id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3519081 | import numpy as np
import pandas as pd
from enlopy.utils import make_timeseries
from enlopy.generate import (add_noise, gen_daily_stoch_el, gen_load_from_daily_monthly, gen_load_sinus, gen_demand_response,
disag_upsample, clean_convert, countweekend_days_per_month,
gen_analytical_LDC, gen_load_from_LDC, gen_corr_arrays, gen_gauss_markov)
class Test_noise():
def test_ndarray_gauss(self):
a = np.random.rand(24)
b = np.random.rand(24) / 10
c = gen_gauss_markov(a, b, .9)
assert isinstance(c, np.ndarray)
def test_ndarray_add_noise_gauss(self):
a = np.random.rand(8760)
b = add_noise(a, 3, 0.05) # Gauss Markov noise
assert isinstance(b, pd.Series)
def test_2d_ndarray_add_noise_gauss(self):
a = np.random.rand(8760, 2)
b = add_noise(a, 3, 0.05) # Gauss Markov noise
assert isinstance(b, pd.DataFrame)
assert (8760,2) == b.shape
def test_ndarray_add_noise_normal(self):
a = np.random.rand(8760)
b = add_noise(a, 1, 0.05) # Gauss Markov noise
assert isinstance(b, pd.Series)
def test_2d_ndarray_add_noise_normal(self):
a = np.random.rand(8760, 2)
b = add_noise(a, 1, 0.05) # Gauss Markov noise
assert isinstance(b, pd.DataFrame)
assert (8760,2) == b.shape
def test_ndarray_add_noise_uniform(self):
a = np.random.rand(8760)
b = add_noise(a, 2, 0.05) # Gauss Markov noise
assert isinstance(b, pd.Series)
def test_2d_ndarray_add_noise_uniform(self):
a = np.random.rand(8760, 2)
b = add_noise(a, 2, 0.05) # Gauss Markov noise
assert isinstance(b, pd.DataFrame)
assert (8760,2) == b.shape
def test_add_noise_not_annual(self):
a = np.random.rand(15)
b = add_noise(a, 3, 0.05)
assert isinstance(b, pd.Series)
class Test_gen_monthly_daily():
def test_gen_monthly_daily(self):
Weight = .55 # Ratio between working and non-working day load (e.g. 70% - 30% )
ML = 1000 * np.ones(12) # monthly load
DWL = np.random.rand(24) * 10 # daily load working
DWL = DWL / DWL.sum() # normalized
DNWL = np.random.rand(24) * 5 # daily load non working
DNWL = DNWL / DNWL.sum() # daily load non working
year = 2014
Load1 = gen_load_from_daily_monthly(ML, DWL, DNWL, Weight, year)
assert len(Load1) == 8760
assert np.isclose(Load1.sum(), np.sum(ML))
class Test_gen_dummy_load():
def test_gen_dummy(self):
a = gen_daily_stoch_el(1500)
assert isinstance(a, np.ndarray)
assert len(a) == 24
class Test_gen_load_sinus():
def test_gen_sinus(self):
Load1 = gen_load_sinus(1,2,3,4,5,6)
assert len(Load1) == 8760
class Test_disag():
def test_disag_daily_to_hourly(self):
x = np.arange(0, 365)
y = (np.cos(2 * np.pi / 364 * x) * 50 + 100)
y = make_timeseries(y, freq='d')
disag_profile = np.random.rand(24)
y_disag = disag_upsample(y, disag_profile)
assert np.isclose(np.sum(y_disag), np.sum(y)) # <= 0.001 #FIXME: np test equality
assert len(y_disag) == 8760
def test_disag_hourly_to_minutes(self):
x = np.arange(0, 8760)
y = (np.cos(2 * np.pi / 8759 * x) * 50 + 100)
y = make_timeseries(y, freq='h')
disag_profile = np.random.rand(60)
y_disag = disag_upsample(y, disag_profile, to_offset='t')
assert np.isclose(np.sum(y_disag), np.sum(y) ) # <= 0.001 #FIXME: np test equality
assert len(y_disag) == 8760*60
class Test_demand_side_management():
def test_load_shifting_small(self):
a = np.random.rand(8760) * 100
a = clean_convert(a, force_timed_index=True, always_df=False)
b = gen_demand_response(a,.1,.2)
assert np.isclose(np.sum(a), np.sum(b))
assert np.max(a) > np.max(b)
def test_load_shifting_big(self):
a = np.random.rand(8760) * 100
a = clean_convert(a, force_timed_index=True, always_df=False)
b = gen_demand_response(a,.15,.5)
assert np.isclose(np.sum(a), np.sum(b))
assert np.max(a) > np.max(b)
def test_countweekend_days_per_month():
a = make_timeseries(year=2015, length=8760, freq='h')
b = countweekend_days_per_month(a.resample('d').mean())
assert len(b) == 12
assert sum(b) == 104 #weekend days in 2015
def test_gen_analytical_LDC():
#Generate a simple LDC with Peak of 1
U = (1, 0.5, 0.2, 8760)
LDC = gen_analytical_LDC(U)
assert max(LDC[0]) == 1.0
assert min(LDC[0]) == 0.0
assert np.isclose(np.mean(LDC[0]), 0.5)
def test_gen_load_from_LDC():
# Only operate 90% of the time.
duration_fraction = 0.9
LDC = gen_analytical_LDC((1, 0.5, 0.2, 8760 * duration_fraction))
b = gen_load_from_LDC(LDC)
assert b.max() <= 1.0
# According to the defined formula anything below should be zero
val_perc = np.percentile(b, (1 - duration_fraction - 0.01) * 100)
assert np.isclose(val_perc, 0.0)
def test_gen_corr_arrays():
Na = 2
length = 1000
r = 0.85
M = np.array([[1, r],
[r, 1]])
A = gen_corr_arrays(Na, length, M)
new_r = np.corrcoef(A)[0][1]
assert A.shape == (Na, length)
#allow some tolerance of convergence..
assert np.abs(new_r - r) <= 0.03 | StarcoderdataPython |
41360 | <filename>random_geometry_points_service/app.py
"""Entry module that inits the flask app.
"""
from flask import Flask
from .endpoints.api import init_api
def create_app():
"""Create the flask app and initialize the random geometry points api.
Returns:
Flask: The initialized flask app
"""
flask_app = Flask(__name__)
api = init_api()
api.init_app(flask_app)
return flask_app
| StarcoderdataPython |
3577150 | import os, sys
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
try:
from setuptools import setup
has_setuptools = True
except ImportError:
from distutils.core import setup
has_setuptools = False
if sys.version_info >= (3, ):
if not has_setuptools:
raise Exception('Python3 support in PyTransmit requires distribute.')
setup(
name='PyTransmit',
version='0.1',
url='http://pytransmit.readthedocs.org/en/latest/',
license='BSD',
author='<NAME>',
author_email='<EMAIL>',
download_url= "https: //github.com/ajkumar25/PyTransmit/tarball/0.1",
description='A flexible FTPClient library for python web development.',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
packages=[
'pytransmit',
],
)
| StarcoderdataPython |
1621528 | <gh_stars>0
import setuptools
import os
with open("README.md", "r", encoding='utf-8') as f:
long_description = f.read()
dirname = os.path.dirname(__file__)
with open('pysg/version.py', 'r') as f:
exec(f.read())
setuptools.setup(
name="pysg",
version=__version__,
author="<NAME>",
author_email="<EMAIL>",
description="Simple and lightweight 3D render scene graph for python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://gitlab.com/becheran/pysg",
packages=setuptools.find_packages(),
python_requires='>=3.6',
license='MIT',
install_requires=[
'moderngl>=5.4.2,<6',
'numpy>=1.15.4,<2',
'pyrr>=0.10.0<1',
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
zip_safe=False
)
| StarcoderdataPython |
3271823 | #%%
import numpy as np
#%%
def Custom_Convolve2D(image, kernel, padding=0, strides=1):
# Cross Correlation
# kernel = np.flipud(np.fliplr(kernel))
## Put your code here.
# Gather Shapes of Kernel + Image + Padding
xKernShape = kernel.shape[0]
yKernShape = kernel.shape[1]
xImgShape = image.shape[0]
yImgShape = image.shape[1]
# Shape of Output Convolution
xOutput = int(((xImgShape - xKernShape + 2 * padding) / strides) + 1)
yOutput = int(((yImgShape - yKernShape + 2 * padding) / strides) + 1)
output = np.zeros((xOutput, yOutput))
# Apply Equal Padding to All Sides
if padding != 0:
imagePadded = np.zeros((xImgShape + padding * 2, yImgShape + padding * 2))
imagePadded[
int(padding) : int(-1 * padding), int(padding) : int(-1 * padding)
] = image
else:
imagePadded = image
# Iterate through image
for y in range(imagePadded.shape[1]):
# Exit Convolution
if y > imagePadded.shape[1] - yKernShape:
break
# Only Convolve if y has gone down by the specified Strides
if y % strides == 0:
for x in range(imagePadded.shape[0]):
# Go to next row once kernel is out of bounds
if x > imagePadded.shape[0] - xKernShape:
break
# Only Convolve if x has gone down by the specified Strides
if x % strides == 0:
output[x // strides, y // strides] = (
kernel * imagePadded[x : x + xKernShape, y : y + yKernShape]
).sum()
return output
def Custom_MaxPlooling(image, pool_size=(2, 2), padding=0, strides=None):
## Put your code here.
# Gather Shapes of pool + Image + Padding
xPoolShape = pool_size[0]
yPoolShape = pool_size[1]
xImgShape = image.shape[0]
yImgShape = image.shape[1]
# Stride
if strides is None:
strides = max(yPoolShape, xPoolShape)
# Shape of Output Convolution
xOutput = int(((xImgShape - xPoolShape + 2 * padding) / strides) + 1)
yOutput = int(((yImgShape - yPoolShape + 2 * padding) / strides) + 1)
output = np.zeros((xOutput, yOutput))
# Apply Equal Padding to All Sides
if padding != 0:
imagePadded = np.zeros((xImgShape + padding * 2, yImgShape + padding * 2))
imagePadded[
int(padding) : int(-1 * padding), int(padding) : int(-1 * padding)
] = image
else:
imagePadded = image
# Iterate through image
for y in range(imagePadded.shape[1]):
# Exit Convolution
if y > imagePadded.shape[1] - yPoolShape:
break
# Only Convolve if y has gone down by the specified Strides
if y % strides == 0:
for x in range(imagePadded.shape[0]):
# Go to next row once kernel is out of bounds
if x > imagePadded.shape[0] - xPoolShape:
break
# Only Convolve if x has gone down by the specified Strides
if x % strides == 0:
output[x // strides, y // strides] = (
imagePadded[x : x + xPoolShape, y : y + yPoolShape]
).max()
return output
def Custom_Flatten(img):
img = (img - img.min()) / (img.max() - img.min())
return img.flatten()
def Custom_Dense(data, units=10):
n = data.shape[0]
data = np.reshape(data, (n, 1))
# Setting the range from 0 to 0.04 to stop exponent explotion
w = np.random.uniform(0, 0.04, [units, n])
b = np.random.uniform(0, 0.04, [units, 1])
return 1 / (1 + np.exp(-1 * (w @ data + b)))
def Custom_softmax(data):
data = np.exp(data)
data = data / data.sum()
return data
#%%
im = np.array(
[
[4, 9, 2, 5, 8, 3],
[5, 6, 2, 4, 0, 3],
[2, 4, 5, 4, 5, 2],
[5, 6, 5, 4, 7, 8],
[5, 7, 7, 9, 2, 1],
[5, 8, 5, 3, 8, 4],
]
)
kernel = np.array([[1, 0, -1], [1, 0, -1], [1, 0, -1]])
ss = np.array([[3.1, 0.3, 1.2]])
# %%
print(Custom_Convolve2D(im, kernel, 1, 1))
print(Custom_softmax(ss))
# %%
| StarcoderdataPython |
1750724 | import os
import glob
import pandas as pd
import dask.dataframe as dd
import pathlib
from datetime import datetime, timedelta
def read_data(dir_name, usecols, dtype, date_range=None):
"""
Return a dataframe with concatenated data.
Set timestamp as index.
Parameters:
dir_name (str): directory name
usecols (list-like): selected columns
dtype (dict): data type for columns
date_range (list of str): list with initial and final date 'yyyy/mm/dd'
"""
filenames = [filename for filename in glob.iglob(dir_name, recursive=True)]
filenames.sort()
if date_range:
idx0 = filenames.index([x for x in filenames if date_range[0] in x][0])
if idx0 != 0:
idx0 -= 1
idx1 = filenames.index([x for x in filenames if date_range[-1] in x][-1]) + 1
filenames = filenames[idx0:idx1]
df = dd.read_csv(filenames,
sep=r'\s+',
usecols=usecols,
dtype=dtype)
df = df.compute()
df['DATE_TIME'] = pd.to_datetime(df['DATE'] + ' ' + df['TIME'])
df = df.set_index('DATE_TIME')
df = df.drop(['DATE', 'TIME'], axis=1)
if date_range:
return df.loc[(df.index >= date_range[0]) &
(df.index < datetime.strptime(date_range[-1], "%Y/%m/%d") + timedelta(days=1))]
else:
return df
def save_24h(df, path, file_id, level):
"""
Save 24-hour files
Parameters:
df (pandas DataFrame): dataframe
path (str): path to save output files
file_id (str): analyzer serial number
level (str): data processing level
"""
for day in df.index.dayofyear.unique():
df_24h = df[(df.index.dayofyear == day)]
year = str(df_24h.index[0].strftime('%Y'))
month = str(df_24h.index[0].strftime('%m'))
full_path = path + '/' + year + '/' + month
pathlib.Path(full_path).mkdir(parents=True, exist_ok=True)
file_name = full_path + \
'/' + file_id + '-' + \
df_24h.index[0].strftime('%Y%m%d') + \
'Z-DataLog_User_' + level + '.csv'
df_24h.to_csv(file_name)
def resample_data(df, t, my_cols):
"""
Returns a dataframe with resampled data [mean, std, count].
Parameters:
df (pandas DataFrame): dataframe
t ('T', 'H', 'D') : minute, hour or day
my_cols (list-like): selected columns
"""
df_mean = df[my_cols].resample(t).mean()
df_std = df[my_cols].resample(t).std()
df_count = df[my_cols].resample(t).count()
return df_mean.join(df_std, rsuffix='_std').join(df_count, rsuffix='_count')
def gantt_data(path, var, pos):
"""
Returns a dataframe with data availability info.
Parameters:
path (str): file name
var (str): selected variable
pos (int): position in the graph (from bottom to top)
"""
df = pd.read_csv(path)
df = df.set_index('DATE_TIME')
df.index = pd.to_datetime(df.index)
df['avail'] = df[var].isnull() # look for null values
df['avail'] = df['avail'].map({False: pos}) # poputlate with graph position
return df
| StarcoderdataPython |
12848758 | import frappe
from datetime import *
@frappe.whitelist()
def add_leave_encashment(doc, method):
from_date = (datetime.strptime(doc.start_date, "%Y-%m-%d")).date()
to_date = (datetime.strptime(doc.end_date, "%Y-%m-%d")).date()
salary_structure = frappe.db.sql(""" SELECT * FROM `tabSalary Structure Assignment` WHERE salary_structure=%s and employee=%s""",(doc.salary_structure,doc.employee),as_dict=1)
amount = 0
leave = 0
while (from_date <= to_date):
leave_application = get_leave_application(from_date, doc.employee)
if len(leave_application) > 0:
leave += 1
from_date = (from_date + timedelta(days=1))
reg = 30 - leave
doc.total_leaves = leave
remaining_leaves = int(frappe.db.sql(""" SELECT * FROM `tabEmployee` WHERE name=%s """,doc.employee,as_dict=1)[0].leave_balance)
quarters = [{"quarter":"First Quarter", "days": 90}, {"quarter":"Second Quarter", "days": 60}, {"quarter":"Third Quarter", "days": 30}, {"quarter":"Fourth Quarter", "days": 0}]
for i in quarters:
if remaining_leaves > i.get("days") and leave > 0:
leave_deduction = remaining_leaves - i.get("days") #90 - 60
if leave_deduction >= leave:
leave_type = get_leave_type("Sick Leave", i.get("quarter"))
amount += ((leave_type[0].percentage / 100) * (salary_structure[0].base / 30)) * leave
remaining_leaves = remaining_leaves - leave
leave = 0
else:
leave_type = get_leave_type("Sick Leave", i.get("quarter"))
amount += ((leave_type[0].percentage / 100) * (salary_structure[0].base / 30)) * leave_deduction
remaining_leaves = remaining_leaves - leave
leave -= leave_deduction
add = True
for ii in doc.earnings:
if ii.__dict__['salary_component'] == "Basic":
add = False
ii.__dict__['amount'] = amount + ((salary_structure[0].base / 30) * reg)
if amount > 0 and add:
doc.append("earnings", {
"salary_component": "Basic",
"amount": amount + ((salary_structure[0].base / 30) * reg)
})
doc.remaining_leaves = remaining_leaves - leave
def update_leave_employee(leave,employee):
frappe.db.sql(""" UPDATE tabEmployee SET leave_balance=%s WHERE name=%s""",(str(leave),employee))
frappe.db.commit()
def get_leave_application(from_date, employee):
query = """ SELECT * FROM `tabLeave Application` WHERE '{0}' BETWEEN from_date and to_date and employee='{1}' and status='{2}' """.format(str(from_date), employee, "Approved")
return frappe.db.sql(query, as_dict=1)
def get_leave_balances(name):
query = """ SELECT * FROM `tabLeave Balances` WHERE parent='{0}' ORDER BY idx DESC """.format(name)
return frappe.db.sql(query, as_dict=1)
def get_leave_type(leave_type, quarter):
return frappe.db.sql(""" SELECT * FROM `tabLeave Type Quarter Percentages` AS LTQP WHERE parent=%s and LTQP.type=%s""", (leave_type,quarter), as_dict=True)
def submit_salary_slip(doc, method):
update_leave_employee(doc.remaining_leaves, doc.employee)
def cancel_salary_slip(doc, method):
remaining_leaves = int(frappe.db.sql(""" SELECT * FROM `tabEmployee` WHERE name=%s """, doc.employee, as_dict=1)[0].leave_balance)
update_leave_employee(remaining_leaves + doc.total_leaves, doc.employee)
| StarcoderdataPython |
8158247 | from django.contrib import admin
from .models import Map_DB
# Register your models here.
admin.site.register(Map_DB) | StarcoderdataPython |
8076826 | import numpy
import subprocess
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import train_test_split
def get_dataset():
X = []
Y = []
labels = ['nomalware', 'malware']
# num_classes is how many class you in your data
for index, name in enumerate(labels):
data_files = subprocess.getoutput(
f'ls /app/data/{name}/out/').split('\n')
for data_file in data_files:
data_file = data_file.replace(' ', '')
data_path = f'/app/data/{name}/out/{data_file}'
data = numpy.load(data_path)
data.resize(1024)
X.append(data)
Y.append(index)
train_X, test_X, train_Y, test_Y = train_test_split(X, Y, test_size=0.1)
train_X = numpy.array(train_X)
train_X = train_X.reshape(-1, 32, 32, 1)
train_Y = numpy.array(train_Y)
train_Y = to_categorical(train_Y, 25)
return train_X, test_X, train_X, test_Y
| StarcoderdataPython |
3565645 | <reponame>NotJoeMartinez/python3-groupme-tools
from collections import defaultdict
import sys
from importlib import reload
reload(sys)
import json
import datetime
def main():
"""
Usage: python posts-by-user.py [filename.json]
Assumes filename.json is a JSON GroupMe transcript.
"""
if len(sys.argv) < 2:
print(main.__doc__)
sys.exit(1)
transcriptFile = open(sys.argv[1])
transcript = json.load(transcriptFile)
transcriptFile.close()
names = {}
counts = defaultdict(lambda: {'messages': 0, 'likes_given': 0, 'likes_received': 0})
print(counts)
totalLikes = 0
for message in transcript:
name = message[u'name']
id = message[u'user_id']
names[id] = name
counts[id]['messages'] += 1
counts[id]['likes_received'] += len(message['favorited_by'])
for user_id in message['favorited_by']:
counts[user_id]['likes_given'] += 1
totalLikes += 1
totalMessages = len(transcript)
print('total message count: ' + str(totalMessages))
output = {
'messages': [],
'likes_given': [],
'likes_received': [],
}
for id, stats in counts.items():
name = names[id]
count = stats['messages']
like_given_count = stats['likes_given']
like_received_count = stats['likes_received']
output['messages'].append(u'{name}: messages: {count} ({msg_pct:.1f}%)'.format(
name=name, count=count, msg_pct=count/float(totalMessages) * 100,
))
output['likes_received'].append(u'{name}: likes received: {like_count} ({like_pct:.1f} per message)'.format(
name=name, like_count=like_received_count, like_pct=like_received_count/float(count),
))
output['likes_given'].append(u'{name}: likes given: {like_count} ({like_pct:.1f}%)'.format(
name=name, like_count=like_given_count, like_pct=like_given_count/float(totalLikes) * 100
))
for category, values in output.items():
print('\n')
print(category)
print('--------')
print('\n'.join(values))
if __name__ == '__main__':
main()
sys.exit(0)
| StarcoderdataPython |
1858627 | from __future__ import division, print_function
import numpy as np
class BCTParamError(RuntimeError):
pass
def teachers_round(x):
'''
Do rounding such that .5 always rounds to 1, and not bankers rounding.
This is for compatibility with matlab functions, and ease of testing.
'''
if ((x > 0) and (x % 1 >= 0.5)) or ((x < 0) and (x % 1 > 0.5)):
return int(np.ceil(x))
else:
return int(np.floor(x))
def pick_four_unique_nodes_quickly(n):
'''
This is equivalent to np.random.choice(n, 4, replace=False)
Another fellow suggested np.random.random(n).argpartition(4) which is
clever but still substantially slower.
'''
k = np.random.randint(n**4)
a = k % n
b = k // n % n
c = k // n ** 2 % n
d = k // n ** 3 % n
if (a != b and a != c and a != d and b != c and b != d and c != d):
return (a, b, c, d)
else:
# the probability of finding a wrong configuration is extremely low
# unless for extremely small n. if n is extremely small the
# computational demand is not a problem.
# In my profiling it only took 0.4 seconds to include the uniqueness
# check in 1 million runs of this function so I think it is OK.
return pick_four_unique_nodes_quickly(n)
def cuberoot(x):
'''
Correctly handle the cube root for negative weights, instead of uselessly
crashing as in python or returning the wrong root as in matlab
'''
return np.sign(x) * np.abs(x)**(1 / 3)
def dummyvar(cis, return_sparse=False):
'''
This is an efficient implementation of matlab's "dummyvar" command
using sparse matrices.
input: partitions, NxM array-like containing M partitions of N nodes
into <=N distinct communities
output: dummyvar, an NxR matrix containing R column variables (indicator
variables) with N entries, where R is the total number of communities
summed across each of the M partitions.
i.e.
r = sum((max(len(unique(partitions[i]))) for i in range(m)))
'''
# num_rows is not affected by partition indexes
n = np.size(cis, axis=0)
m = np.size(cis, axis=1)
r = np.sum((np.max(len(np.unique(cis[:, i])))) for i in range(m))
nnz = np.prod(cis.shape)
ix = np.argsort(cis, axis=0)
# s_cis=np.sort(cis,axis=0)
# FIXME use the sorted indices to sort by row efficiently
s_cis = cis[ix][:, range(m), range(m)]
mask = np.hstack((((True,),) * m, (s_cis[:-1, :] != s_cis[1:, :]).T))
indptr, = np.where(mask.flat)
indptr = np.append(indptr, nnz)
import scipy.sparse as sp
dv = sp.csc_matrix((np.repeat((1,), nnz), ix.T.flat, indptr), shape=(n, r))
return dv.toarray()
| StarcoderdataPython |
5162334 | <filename>python/example_requests.py
import datetime
from confidentcannabis import ConfidentCannabis
if __name__ == '__main__':
api_key = 'PUT-YOUR-API-KEY-HERE'
api_secret = 'PUT-YOUR-API-SECRET-HERE'
cc = ConfidentCannabis(api_key, api_secret, api_stage='sandbox')
client_id = 1
order_id = '1610LAB0001'
sample_id = '1610LAB0001.0001'
upload_file_path = 'path/to/a/file'
sample_coa_path = 'path/to/a/pdf'
sample_image_path = 'path/to/an/image'
set_sample_cover = True
test_results = {
'categories': {
'cannabinoids': {
'info_fields': {
'input_units': '%',
'report_units': '%',
'footnote': 'Only the highest quality methods',
'signatory_name': '<NAME>',
'signatory_title': 'Lab Director',
'unit_description': 'Flower',
'date_tested': str(datetime.datetime.now())
},
'compounds': [
{
'name': 'd9_thc',
'value': '0.19',
'loq': '0.01'
},
{
'name': 'thca',
'value': '0.03',
'loq': '0.01'
},
{
'name': 'cbd',
'value': '0.02',
'loq': '0.01'
}
]
},
'pesticides': {
'info_fields': {
'input_units': 'ppm',
'report_units': 'ppb',
'footnote': 'Checked for pesticides',
'signatory_name': '<NAME>',
'signatory_title': 'Lab Director',
'date_tested': str(datetime.datetime.now())
},
'compounds': [
{
'name': 'avermectin',
'value': '20',
'loq': '10',
'limit': '50'
},
{
'name': 'ddvp',
'value': '20',
'loq': '10',
'limit': '50'
},
{
'name': 'gamma_bhc',
'value': '20',
'loq': '10',
'limit': '50'
}
]
}
}
}
# ------
# CLIENTS
# ------
# print "List Clients"
# print cc.get_clients()
# print "Client Details"
# print cc.get_client(client_id)
# ------
# ORDERS
# ------
# print "List Orders"
# orders = cc.get_orders()
# print orders
# print "Paged Orders"
# paged_orders = cc.get_orders(start=len(orders)-2, limit=1)
# print paged_orders
# assert(len(paged_orders), 1)
# print "Only In Progress Orders"
# in_progress_orders = cc.get_orders(status_id=2)
# print in_progress_orders
# print "Order Details"
# print cc.get_order(order_id)
# print "Upload Order Document"
# cc.upload_order_document(order_id, upload_file_path)
# ------
# SAMPLES
# ------
# print "List Samples"
# samples = cc.get_samples()
# print samples
# print "Paged Samples"
# paged_samples = cc.get_samples(start=len(samples)-2, limit=1)
# print paged_samples
# assert(len(paged_samples), 1)
# print "Only In Progress Samples"
# in_progress_samples = cc.get_samples(order_status_id=2)
# print in_progress_samples
# print "Sample Details"
# print cc.get_sample(sample_id)
# print "Submit Sample Test Results"
# cc.submit_test_results(sample_id, test_results)
# print "Upload Sample COA"
# cc.upload_sample_coa(
# sample_id,
# sample_coa_path,
# )
# print "Upload Sample Image"
# cc.upload_sample_image(
# sample_id,
# sample_image_path,
# set_as_cover=set_sample_cover
# )
# ------
# INFO
# ------
# print "COMPOUNDS"
# print cc.get_compounds()
# print "ORDER STATUSES"
# print cc.get_order_statuses()
# print "SAMPLE CATEGORIES"
# print cc.get_sample_categories()
# print "SAMPLE TYPES"
# print cc.get_sample_types()
# print "TEST TYPES"
# print cc.get_test_types()
| StarcoderdataPython |
1892318 | #!/usr/bin/env python3
from __future__ import print_function
import os
import sys
import math
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from .util import custom_qt_items as cqt
from .util import file_io
from .util.mygraphicsview import MyGraphicsView
sys.path.append('..')
import qtutil
import pickle
import numpy as np
from scipy import stats
import matplotlib
import uuid
import csv
from pyqtgraph.Qt import QtGui
from .util.plugin import WidgetDefault
from .util.plugin import PluginDefault
from .util.custom_qt_items import RoiList
import functools
import itertools
import matplotlib.pyplot as plt
import math
from .util.custom_pyqtgraph_items import GradientLegend
from .util.visualization_window import DockWindow
from pyqtgraph.dockarea import *
from .util import project_functions as pfs
def combined_mean(ns_and_means):
''' ns_and_means = (n1, mean1), (n2, mean2) ...
Calculates the combined mean '''
numerator = 0
denominator = 0
for n_and_mean in ns_and_means:
numerator = numerator + n_and_mean[0]*n_and_mean[1]
denominator = denominator + n_and_mean[0]
return numerator / denominator
def combined_st_dev(ns_and_means_and_stdevs):
'''ns_and_means_and_stdevs = (n1, mean1, stdev1), (n2, mean2, stdev2) ...
Calculates the combined standard deviation
Fomula: https://stats.stackexchange.com/questions/55999/is-it-possible-to-find-the-combined-standard-deviation'''
numerator = 0
denominator = 0
ns_and_means = [x[:-1] for x in ns_and_means_and_stdevs]
for n_and_mean_and_stdev in ns_and_means_and_stdevs:
numerator = numerator + ((n_and_mean_and_stdev[0] - 1)*n_and_mean_and_stdev[2]**2) + \
(n_and_mean_and_stdev[0]*(n_and_mean_and_stdev[1] - combined_mean(ns_and_means))**2)
denominator = denominator + n_and_mean_and_stdev[0]
denominator = denominator - 1
return math.sqrt(numerator / denominator)
def calc_avg(roi, frames, image):
mask = roi.getROIMask(frames, image, axes=(1, 2))
masksize = np.count_nonzero(mask)
roiframes = frames * mask[np.newaxis, :, :]
roiframes = np.ndarray.sum(np.ndarray.sum(roiframes, axis=1), axis=1)
return roiframes / masksize
def calc_connectivity(video_path, image, rois):
frames = file_io.load_file(video_path)
avgs = [calc_avg(roi, frames, image) for roi in rois]
pearson = lambda x, y: stats.pearsonr(x, y)[0]
return [[pearson(x, y) for x in avgs] for y in avgs]
class RoiListModified(RoiList):
def selected_roi_changed(self, selection):
super().selected_roi_changed(selection)
rois_selected = [str(self.selectionModel().selectedIndexes()[x].data(Qt.DisplayRole))
for x in range(len(self.selectionModel().selectedIndexes()))]
self.widget.selected_rois_list.clear()
self.widget.selected_rois_list.addItems([r for r in rois_selected])
class DockWindowMat(DockWindow):
def __init__(self, win, parent, state=None, area=None, title=None):
super(DockWindowMat, self).__init__(None, area, title, parent)
self.parent = parent
self.state = state
self.connectivity_dialog = win
docks = range(len([i for i in self.area.docks.keys()]))
d = Dock("Connectivity Matrix", size=(500, 200), closable=True)
d.addWidget(win)
self.area.addDock(d, 'above', self.area.docks['d1'])
min_label = parent.min_sb.value()
max_label = parent.max_sb.value()
cm_type = parent.cm_comboBox.currentText()
view = MyGraphicsView(parent.project, parent=None, image_view_on=True)
l = GradientLegend(min_label, max_label, cm_type)
l.setParentItem(view.vb)
d = Dock("Gradient Legend", size=(500, 200), closable=True)
d.addWidget(view)
self.area.addDock(d, 'above', self.area.docks['d2'])
# close placeholder docks
for dock in docks:
self.area.docks['d'+str(dock+1)].close()
if state:
self.area.restoreState(self.state)
def setup_gradient_legend(self, l):
pass
def save_state(self):
save_loc = super().save_state()
with open(save_loc, 'rb') as input:
state = pickle.load(input)
try:
with open(save_loc, 'wb') as output:
pickle.dump([self.connectivity_dialog.model._data, self.connectivity_dialog.model.roinames,
self.connectivity_dialog.selected_image_stacks,
state], output, -1)
except:
qtutil.critical(save_loc + " failed to save.")
return
def load_state(self):
filenames = QFileDialog.getOpenFileNames(
self, 'Load matrix', QSettings().value('last_vis_path'),
'visualization window pickle (*.pkl)')[0]
if not filenames:
return
QSettings().setValue('last_vis_path', os.path.dirname(filenames[0]))
for filename in filenames:
try:
with open(filename, 'rb') as input:
[mat_data, roinames, selected_image_stacks, state] = pickle.load(input)
cm_type = self.parent.cm_comboBox.currentText()
win = ConnectivityDialog(self.parent, roinames, cm_type, mat_data)
new_dw = DockWindowMat(win, parent=self.parent, state=state, title=os.path.basename(filename))
self.parent.open_dialogs.append(new_dw)
new_dw.show()
new_dw.saving_state[str].connect(functools.partial(pfs.save_dock_window_to_project, self.parent,
self.parent.Defaults.window_type))
except:
qtutil.critical(filename + " failed to open. Aborting.")
return
def closeEvent(self, event):
super().closeEvent(event)
self.parent.open_dialogs.remove(self)
class Widget(QWidget, WidgetDefault):
class Labels(WidgetDefault.Labels):
colormap_index_label = "Choose Colormap:"
sb_min_label = "Min colormap range"
sb_max_label = "Max colormap range"
class Defaults(WidgetDefault.Defaults):
colormap_index_default = 1
roi_list_types_displayed = ['auto_roi', 'roi']
window_type = 'connectivity_matrix'
sb_min_default = -1.00
sb_max_default = 1.00
def __init__(self, project, plugin_position, parent=None):
super(Widget, self).__init__(parent=parent)
if not project or not isinstance(plugin_position, int):
return
self.project = project
self.view = MyGraphicsView(self.project)
self.video_list = QListView()
self.roi_list = QListView()
self.selected_rois_list = QListWidget()
self.roi_list.setModel(RoiModel())
# todo: there is a mismatch in type between RoiModel and RoiItemModel in custom_qt_items. As such it was easier
# to abandon the convention of not initializing UI paramaters in init to get it funcitonal. Nonetheless, these
# next few lines really should be in a class somewhere for the roi_list item
# for f in project.files:
# if f['type'] == self.Defaults.roi_list_types_displayed:
# item = QStandardItem(f['name'])
# item.setData(f['path'], Qt.UserRole)
# self.roi_list.model().appendRow(item)
self.avg_mat_pb = QPushButton("Average Matrices")
self.sub_mat_pb = QPushButton("Subtract Matrices")
self.cm_comboBox = QtGui.QComboBox(self)
self.min_sb = QDoubleSpinBox()
self.max_sb = QDoubleSpinBox()
self.save_pb = QPushButton("Generate csv files of all open matrices")
self.load_pb = QPushButton("&Load project matrix windows")
self.mask_checkbox = QCheckBox("Mask Symmetry")
self.sem_checkbox = QCheckBox("Use SEM instead of SD")
self.cm_pb = QPushButton('Correlation &Matrix')
self.roi_list = RoiListModified(self, self.Defaults.roi_list_types_displayed, RoiModel())
WidgetDefault.__init__(self, project=project, plugin_position=plugin_position)
def setup_ui(self):
super().setup_ui()
self.vbox.addWidget(qtutil.separator())
self.vbox.addWidget(QLabel("Matrix Math Functions"))
hbox = QHBoxLayout()
hbox.addWidget(self.avg_mat_pb)
hbox.addWidget(self.sub_mat_pb)
self.vbox.addLayout(hbox)
self.vbox.addWidget(qtutil.separator())
self.vbox.addWidget(cqt.InfoWidget('Note that rois can be dragged and dropped in the list but that the order '
'in which they are *selected* determines how the matrix is ordered. The '
'first selected ROI is placed at the top of the matrix. '
'Dragging and dropping is for convenience so you can organize your desired '
'order and then shift select them from top to bottom to quickly have your '
'desired matrix ordering.'))
hbox = QHBoxLayout()
hbox.addWidget(QLabel('Select ROIs:'))
hbox.addWidget(QLabel('Selected ROIs:'))
self.vbox.addLayout(hbox)
self.roi_list.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.roi_list.setAcceptDrops(True)
self.roi_list.setDragEnabled(True)
self.roi_list.setDropIndicatorShown(True)
self.roi_list.setDragDropMode(QAbstractItemView.InternalMove)
self.roi_list.setDefaultDropAction(Qt.MoveAction)
self.roi_list.setDragDropOverwriteMode(False)
hbox = QHBoxLayout()
hbox.addWidget(self.roi_list)
hbox.addWidget(self.selected_rois_list)
self.vbox.addLayout(hbox)
self.vbox.addWidget(QLabel(self.Labels.colormap_index_label))
# todo: colormap list should be dealt with in a separate script
self.cm_comboBox.addItem("jet")
self.cm_comboBox.addItem("viridis")
self.cm_comboBox.addItem("inferno")
self.cm_comboBox.addItem("plasma")
self.cm_comboBox.addItem("magma")
self.cm_comboBox.addItem("coolwarm")
self.cm_comboBox.addItem("PRGn")
self.cm_comboBox.addItem("seismic")
self.vbox.addWidget(self.cm_comboBox)
hbox = QHBoxLayout()
hbox.addWidget(QLabel(self.Labels.sb_min_label))
hbox.addWidget(QLabel(self.Labels.sb_max_label))
self.vbox.addLayout(hbox)
hbox = QHBoxLayout()
hbox.addWidget(self.min_sb)
hbox.addWidget(self.max_sb)
def min_handler(max_of_min):
self.min_sb.setMaximum(max_of_min)
def max_handler(min_of_max):
self.max_sb.setMinimum(min_of_max)
self.min_sb.valueChanged[float].connect(max_handler)
self.max_sb.valueChanged[float].connect(min_handler)
self.min_sb.setMinimum(-1.0)
self.max_sb.setMaximum(1.0)
self.min_sb.setSingleStep(0.1)
self.max_sb.setSingleStep(0.1)
self.vbox.addLayout(hbox)
self.vbox.addWidget(self.save_pb)
# self.vbox.addWidget(self.load_pb)
self.mask_checkbox.setChecked(True)
self.sem_checkbox.setChecked(False)
self.vbox.addWidget(self.mask_checkbox)
self.vbox.addWidget(self.sem_checkbox)
self.vbox.addWidget(self.cm_pb)
def setup_signals(self):
super().setup_signals()
self.cm_pb.clicked.connect(self.connectivity_triggered)
self.save_pb.clicked.connect(self.save_triggered)
self.load_pb.clicked.connect(self.load_triggered)
self.sub_mat_pb.clicked.connect(self.sub_mat_triggered)
self.avg_mat_pb.clicked.connect(self.avg_mat_triggered)
def setup_params(self, reset=False):
super().setup_params(reset)
self.roi_list.setup_params()
if len(self.params) == 1 or reset:
self.update_plugin_params(self.Labels.colormap_index_label, self.Defaults.colormap_index_default)
self.update_plugin_params(self.Labels.sb_min_label, self.Defaults.sb_min_default)
self.update_plugin_params(self.Labels.sb_max_label, self.Defaults.sb_max_default)
self.cm_comboBox.setCurrentIndex(self.params[self.Labels.colormap_index_label])
self.min_sb.setValue(self.params[self.Labels.sb_min_label])
self.max_sb.setValue(self.params[self.Labels.sb_max_label])
def setup_param_signals(self):
super().setup_param_signals()
self.roi_list.setup_param_signals()
self.cm_comboBox.currentIndexChanged[int].connect(functools.partial(self.update_plugin_params,
self.Labels.colormap_index_label))
self.min_sb.valueChanged[float].connect(functools.partial(self.update_plugin_params,
self.Labels.sb_min_label))
self.max_sb.valueChanged[float].connect(functools.partial(self.update_plugin_params,
self.Labels.sb_max_label))
def sub_mat_triggered(self):
qtutil.info("Please select the matrix that will act as the minuend")
minuend_path = QFileDialog.getOpenFileName(
self, 'Load matrix', QSettings().value('last_vis_path'),
'visualization window pickle (*.pkl)')[0]
if not minuend_path:
return
qtutil.info("Please select the matrix that will act as the subtrahend. This second matrix must have the "
"same dimensions and ROIs in the same locations as the minuend matrix")
subtrahand_path = QFileDialog.getOpenFileName(
self, 'Load matrix', QSettings().value('last_vis_path'),
'visualization window pickle (*.pkl)')
if not subtrahand_path:
return
QSettings().setValue('last_vis_path', os.path.dirname(subtrahand_path))
try:
with open(minuend_path, 'rb') as input:
[minuend_mat_data, minuend_roinames, minuend_selected_image_stacks, minuend_state] = pickle.load(input)
except:
qtutil.critical(minuend_path + " failed to open. Aborting. Make sure this file is a MATRIX pkl file")
try:
with open(subtrahand_path, 'rb') as input:
[subtrahand_mat_data, subtrahand_roinames, subtrahand_selected_image_stacks,
subtrahand_state] = pickle.load(input)
except:
qtutil.critical(subtrahand_path + " failed to open. Aborting. Make sure this file is a MATRIX pkl file")
if subtrahand_roinames != minuend_roinames:
qtutil.critical('roi names do not match. The same roi names and same order is required. Aborting.')
return
minuend_number = len(minuend_selected_image_stacks)
subtrahand_number = len(subtrahand_selected_image_stacks)
altogether = minuend_number + subtrahand_number
# minus one = Bessel correction.
# See https://stats.stackexchange.com/questions/55999/is-it-possible-to-find-the-combined-standard-deviation
ns_and_means_and_stdevs = [[[] for j in range(len(subtrahand_mat_data))]
for i in range(len(subtrahand_mat_data[0]))]
for row_no, row in enumerate(ns_and_means_and_stdevs):
for col_no, col in enumerate(ns_and_means_and_stdevs[0]):
ns_and_means_and_stdevs[row_no][col_no] = [[minuend_number] + list(minuend_mat_data[row_no][col_no]),
[subtrahand_number] +
list(subtrahand_mat_data[row_no][col_no])]
sub_mat = [[[] for j in range(len(subtrahand_mat_data[0]))] for i in range(len(subtrahand_mat_data))]
for row_no, row in enumerate(sub_mat):
for col_no, col in enumerate(sub_mat[0]):
sub_mat[row_no][col_no] = (minuend_mat_data[row_no][col_no][0]-subtrahand_mat_data[row_no][col_no][0],
combined_st_dev(ns_and_means_and_stdevs[row_no][col_no]))
cm_type = self.cm_comboBox.currentText()
win = ConnectivityDialog(self, minuend_roinames, cm_type, sub_mat)
new_dw = DockWindowMat(win, parent=self, state=minuend_state, title=os.path.basename(minuend_path) +
' - ' + os.path.basename(subtrahand_path))
self.open_dialogs.append(new_dw)
new_dw.show()
new_dw.saving_state[str].connect(functools.partial(pfs.save_dock_window_to_project, self,
self.Defaults.window_type))
def avg_mat_triggered(self):
qtutil.info("Please select all the matrices to be averaged. Matrices must have the same ROIs in the same "
"locations. You might find it easier to move all the matrix pkl files to the same folder before "
"performing this action.")
paths = QFileDialog.getOpenFileNames(
self, 'Load matrices', QSettings().value('last_vis_path'),
'visualization window pickle (*.pkl)')
dat = []
roinames_previous = []
path_previous = ''
for path in paths:
try:
with open(path, 'rb') as input:
[mat_data, roinames, selected_image_stacks, state] = \
pickle.load(input)
dat = dat + [[mat_data, roinames, selected_image_stacks, state]]
if roinames != roinames_previous and roinames_previous:
qtutil.critical(path + 'does not have the same ROI names as ' + path_previous)
return
roinames_previous = roinames
path_previous = path
except:
qtutil.critical(path + " failed to open. Aborting. Make sure this file is a MATRIX pkl file")
mat_datas = [[len(d[2]), d[0]] for d in dat]
ns_and_means_and_stdevs = [[[] for j in range(len(mat_datas[0][1][0]))] for i in range(len(mat_datas[0][1]))]
for row_no, row in enumerate(mat_datas[0][1]):
for col_no, col in enumerate(mat_datas[0][1][0]):
for mat_data in mat_datas:
ns_and_means_and_stdevs[row_no][col_no] = ns_and_means_and_stdevs[row_no][col_no] + \
[[mat_data[0]] + list(mat_data[1][row_no][col_no])]
result = [[[] for j in range(len(mat_datas[0][1][0]))] for i in range(len(mat_datas[0][1]))]
for row_no, row in enumerate(result):
for col_no, col in enumerate(result[0]):
result[row_no][col_no] = (combined_mean([x[:-1] for x in ns_and_means_and_stdevs[row_no][col_no]]),
combined_st_dev(ns_and_means_and_stdevs[row_no][col_no]))
cm_type = self.cm_comboBox.currentText()
win = ConnectivityDialog(self, roinames, cm_type, loaded_data=result)
new_dw = DockWindowMat(win, parent=self, state=state, title=os.path.basename(paths[0]) + ' ' + str(len(paths)) +
' other matrices averaged')
self.open_dialogs.append(new_dw)
new_dw.show()
new_dw.saving_state[str].connect(functools.partial(pfs.save_dock_window_to_project, self,
self.Defaults.window_type))
def connectivity_triggered(self):
cm_type = self.cm_comboBox.currentText()
progress = QProgressDialog('Generating Correlation Matrix...', 'Abort', 0, 100, self)
progress.setAutoClose(True)
progress.setMinimumDuration(0)
def callback(x):
progress.setValue(x * 100)
QApplication.processEvents()
indexes = self.roi_list.selectionModel().selectedIndexes()
roinames = [index.data(Qt.DisplayRole) for index in indexes]
rois = [self.view.vb.getRoi(roiname) for roiname in roinames]
if not self.view.vb.img:
qtutil.critical('Select video.')
elif not rois:
qtutil.critical('Select Roi(s).')
else:
win = ConnectivityDialog(self, roinames, cm_type, progress_callback=callback)
win.resize(900, 900)
callback(1)
# self.open_dialogs.append(win)
# todo: add matrices to docks
dock_window = DockWindowMat(win, parent=self)
self.open_dialogs.append(dock_window)
dock_window.show()
self.save_open_dialogs_to_csv()
def filedialog(self, name, filters):
path = self.project.path
dialog = QFileDialog(self)
dialog.setWindowTitle('Export to')
dialog.setDirectory(str(path))
dialog.setFileMode(QFileDialog.AnyFile)
dialog.setOption(QFileDialog.DontUseNativeDialog)
dialog.selectFile(name)
dialog.setFilter(';;'.join(filters.values()))
dialog.setAcceptMode(QFileDialog.AcceptSave)
if not dialog.exec_():
return None
filename = str(dialog.selectedFiles()[0])
QSettings().setValue('export_path', os.path.dirname(filename))
filter_ = str(dialog.selectedNameFilter())
ext = [f for f in filters if filters[f] == filter_][0]
if not filename.endswith(ext):
filename = filename + ext
return filename
def save_triggered(self):
if not self.open_dialogs:
qtutil.info('No correlation matrix windows are open. ')
return
self.save_open_dialogs_to_csv()
qtutil.info('csvs saved to project directory')
return
#todo: improve general user experience (saving,loading etc). Look at below
continue_msg = "All Correlation Matrices will be closed after saving, *including* ones you have not saved. \n" \
"\n" \
"Continue?"
reply = QMessageBox.question(self, 'Save All',
continue_msg, QMessageBox.Yes | QMessageBox.No)
if reply == QMessageBox.No:
return
qtutil.info(
'There are ' + str(len(self.open_dialogs)) + ' correlation matrices in memory. We will now choose a path '
'to save each one to. Simply don\'t save ones you have '
'purposefully closed. Though, good news, you now have '
'one last chance to save and recover any matrices you '
'accidentally closed')
for dialog in self.open_dialogs:
win_title = dialog.windowTitle()
filters = {
'.pkl': 'Python pickle file (*.pkl)'
}
default = win_title
pickle_path = self.filedialog(default, filters)
if pickle_path:
self.project.files.append({
'path': pickle_path,
'type': self.Defaults.window_type,
'name': os.path.basename(pickle_path)
})
self.project.save()
# for row in dialog.model._data:
# for cell in row:
# if math.isnan(cell[0]) or math.isnan(cell[0]):
# qtutil.warning("File might not save properly since it has nan values. Make sure all your "
# "ROIs are inside your mask.")
# break
# Now save the actual file
title = os.path.basename(pickle_path)
matrix_output_data = (title, dialog.connectivity_dialog.model.roinames,
dialog.connectivity_dialog.model._data)
try:
with open(pickle_path, 'wb') as output:
pickle.dump(matrix_output_data, output, -1)
except:
qtutil.critical(
pickle_path + " could not be saved. Ensure MBE has write access to this location and "
"that another program isn't using this file.")
qtutil.info("All files have been saved")
csv_msg = "Save csv files of all open Correlation Matrix windows as well?"
reply = QMessageBox.question(self, 'Save All',
csv_msg, QMessageBox.Yes | QMessageBox.No)
if reply == QMessageBox.Yes:
self.save_open_dialogs_to_csv()
for dialog in self.open_dialogs:
dialog.close()
self.open_dialogs = []
def load_triggered(self):
paths = [p['path'] for p in self.project.files if p['type'] == self.Defaults.window_type]
if not paths:
qtutil.info("Your project has no correlation matrices. Make and save some!")
return
for pickle_path in paths:
try:
with open(pickle_path, 'rb') as input:
(title, roinames, dat) = pickle.load(input)
except:
del_msg = pickle_path + " could not be loaded. If this file exists, " \
"ensure MBE has read access to this " \
"location and that another program isn't using this file " \
"" \
"\n \nOtherwise, would you like to detatch this file from your project? "
reply = QMessageBox.question(self, 'File Load Error',
del_msg, QMessageBox.Yes | QMessageBox.No)
if reply == QMessageBox.Yes:
norm_path = os.path.normpath(pickle_path)
self.project.files[:] = [f for f in self.project.files if
os.path.normpath(f['path']) != norm_path]
self.project.save()
load_msg = pickle_path + " detatched from your project." \
"" \
"\n \n Would you like to continue loading the " \
"remaining project matrices?"
reply = QMessageBox.question(self, 'Continue?',
load_msg, QMessageBox.Yes | QMessageBox.No)
if reply == QMessageBox.No:
return
continue
main_window = ConnectivityDialog(self, roinames, self.cm_comboBox.currentText(), dat)
main_window.setWindowTitle(title)
main_window.resize(900, 900)
main_window.show()
self.open_dialogs.append(main_window)
def save_open_dialogs_to_csv(self):
if not self.open_dialogs:
qtutil.info('No correlation matrix windows are open. ')
return
for i, dialog in enumerate(self.open_dialogs):
rois_names = [dialog.connectivity_dialog.model.rois[x].name for x in range(
len(dialog.connectivity_dialog.model.rois))]
file_name_avg = os.path.splitext(os.path.basename(dialog.windowTitle()))[0] + \
'_averaged_correlation_matrix.csv'
file_name_stdev = os.path.splitext(os.path.basename(dialog.windowTitle()))[0] + \
'_stdev_correlation_matrix.csv'
with open(os.path.join(self.project.path, file_name_avg), 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(rois_names)
for row_ind in range(len(dialog.connectivity_dialog.model._data)):
row = dialog.connectivity_dialog.model._data[row_ind]
row = [row[x][0] for x in range(len(row))]
writer.writerow(row)
writer.writerow(['Selected videos:']+self.selected_videos)
# Do the standard deviation
with open(os.path.join(self.project.path, file_name_stdev), 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(rois_names)
for row_ind in range(len(dialog.connectivity_dialog.model._data)):
row = dialog.connectivity_dialog.model._data[row_ind]
row = [row[x][1] for x in range(len(row))]
writer.writerow(row)
writer.writerow(['Selected videos:'] + self.selected_videos)
def setup_whats_this(self):
super().setup_whats_this()
self.roi_list.setWhatsThis("Choose ROIs where the average value for each frame across frames is used for each "
"selected ROI. This set of values is correlated with the average of all other ROIs "
"to create the correlation matrix. ")
self.cm_comboBox.setWhatsThis("Choose the colormap used to represent your matrices. Note that we "
"discourage the use of jet. For a discussion on this please see "
"'Why We Use Bad Color Maps and What You Can Do About It.' <NAME>. "
"In Proceedings of Human Vision and Electronic Imaging")
self.save_pb.setWhatsThis("Saves the data from all open matrix windows to file and the project. This includes "
"the option to save to csv - one for standard deviation and one for correlation "
"values for each matrix in view")
self.load_pb.setWhatsThis("Loads all matrix windows associated with this plugin that have been saved. Click "
"'Manage Data' to find each window associated with this project. Individual windows "
"can be deleted from there. ")
self.cm_pb.setWhatsThis("Creates a single correlation matrix where each correlation matrix from selected "
"image stacks are averaged to create a single correlation matrix that has a standard "
"deviation displaying how correlation deviates across selected image stacks for each "
"ROI. Correlation coefficient used = Pearson")
class ConnectivityModel(QAbstractTableModel):
def __init__(self, widget, roinames, cm_type, loaded_data=None, progress_callback=None):
super(ConnectivityModel, self).__init__()
self.widget = widget
self.cm_type = cm_type
self.roinames = roinames
project = widget.project
rois = widget.view.vb.rois[:]
for roi in rois:
widget.view.vb.removeRoi(roi.name)
widget.view.vb.currentROIindex = 0
roipaths = [os.path.join(project.path, roiname + '.roi') for roiname in roinames]
widget.view.vb.loadROI(roipaths)
self.rois = [widget.view.vb.getRoi(roiname) for roiname in roinames]
if loaded_data:
self._data = loaded_data
else:
selected_videos = widget.selected_videos
image = widget.view.vb.img
self.matrix_list = []
avg_data = []
tot_data = []
dict_for_stdev = {}
for key in [i for i in list(itertools.product(range(len(self.rois)), range(len(self.rois))))]:
dict_for_stdev[key] = []
for i, video_path in enumerate(selected_videos):
if progress_callback:
progress_callback(i / len(selected_videos))
self._data = calc_connectivity(video_path, image, self.rois)
self.matrix_list = self.matrix_list + [self._data]
if tot_data == []:
tot_data = self._data
if avg_data == []:
avg_data = self._data
for i in range(len(tot_data)):
for j in range(len(tot_data)):
dict_for_stdev[(i, j)] = dict_for_stdev[(i, j)] + [self._data[i][j]]
# ignore half of graph
if widget.mask_checkbox.isChecked() and i < j:
dict_for_stdev[(i, j)] = [0]
# Start above with self._data receiving= the first value before adding on the rest.
# don't add the first value twice
if os.path.normpath(video_path) != os.path.normpath(selected_videos[0]):
tot_data[i][j] = tot_data[i][j] + self._data[i][j]
# Finally compute averages
for i in range(len(tot_data)):
for j in range(len(tot_data)):
if progress_callback:
progress_callback((i*j) / (len(tot_data)*len(tot_data)))
# ignore half of graph
if widget.mask_checkbox.isChecked() and i < j:
avg_data[i][j] = 0
else:
avg_data[i][j] = tot_data[i][j] / len(selected_videos)
if widget.sem_checkbox.isChecked():
stdev_dict = {k: stats.sem(v) for k, v in dict_for_stdev.items()}
else:
stdev_dict = {k: np.std(v) for k, v in dict_for_stdev.items()}
assert(stdev_dict[(0, 0)] == 0 or math.isnan(stdev_dict[(0, 0)]))
# combine stddev and avg data
for i in range(len(avg_data)):
for j in range(len(avg_data)):
if progress_callback:
progress_callback((i*j) / (len(avg_data) * len(avg_data)))
avg_data[i][j] = (avg_data[i][j], stdev_dict[(i, j)])
self._data = avg_data
assert(avg_data != [])
def rowCount(self, parent):
return len(self._data)
def columnCount(self, parent):
return len(self._data)
def data(self, index, role):
tup = self._data[index.row()][index.column()]
if role == Qt.DisplayRole:
return str(round(tup[0], 2))+" +/- "+str(round(tup[1], 2))
elif role == Qt.BackgroundRole:
value = float(tup[0])
min_label = self.widget.min_sb.value()
max_label = self.widget.max_sb.value()
gradient_range = matplotlib.colors.Normalize(min_label, max_label)
cm_type = self.widget.cm_comboBox.currentText()
cmap = matplotlib.cm.ScalarMappable(
gradient_range, plt.get_cmap(cm_type))
color = cmap.to_rgba(value, bytes=True)
# color = plt.cm.jet(value)
# color = [x * 255 for x in color]
return QColor(*color)
elif role == Qt.TextAlignmentRole:
return Qt.AlignCenter
return
def headerData(self, section, orientation, role):
if role == Qt.DisplayRole:
return self.rois[section].name
return
class ConnectivityTable(QTableView):
def __init__(self, parent=None):
super(ConnectivityTable, self).__init__(parent)
self.setSelectionMode(QAbstractItemView.NoSelection)
self.horizontalHeader().setResizeMode(QHeaderView.Stretch)
self.verticalHeader().setMaximumWidth(100)
self.verticalHeader().setResizeMode(QHeaderView.Stretch)
self.setMinimumSize(400, 300)
class ConnectivityDialog(QDialog):
def __init__(self, widget, roinames, cm_type, loaded_data=None, progress_callback=None):
super(ConnectivityDialog, self).__init__()
self.setWindowFlags(self.windowFlags() | Qt.WindowMinimizeButtonHint | Qt.WindowMaximizeButtonHint)
self.setWindowTitle('Correlation Matrix - ' + str(uuid.uuid4()))
self.table = ConnectivityTable()
self.setup_ui()
self.model = ConnectivityModel(widget, roinames, cm_type, loaded_data, progress_callback)
self.table.setModel(self.model)
self.selected_image_stacks = widget.selected_videos
# view.setAspectLocked(True)
#todo: add GradientLegend
min_label = widget.min_sb.value()
max_label = widget.max_sb.value()
l = GradientLegend(min_label, max_label, cm_type)
# l.show()
# win = pg.GraphicsWindow()
# win.setFixedSize(l.labelsize)
# view = win.addViewBox()
# view.addItem(l)
# l.setParentItem(view)
# win.show()
# win.setParent(self)
def setup_ui(self):
vbox = QVBoxLayout()
vbox.addWidget(self.table)
self.setLayout(vbox)
class RoiModel(QStandardItemModel):
def __init__(self, parent=None):
super(RoiModel, self).__init__(parent)
def supportedDropActions(self):
return Qt.MoveAction
def dropMimeData(self, data, action, row, column, parent):
return super(RoiModel, self).dropMimeData(data, action, row, column, parent)
def flags(self, index):
if not index.isValid() or index.row() >= self.rowCount() or index.model() != self:
return Qt.ItemIsDropEnabled # we allow drops outside the items
return super(RoiModel, self).flags(index) & (~Qt.ItemIsDropEnabled)
def removeRows(self, row, count, parent):
return super(RoiModel, self).removeRows(row, count, parent)
def insertRows(self, row, count, parent):
return super(RoiModel, self).insertRows(row, count, parent)
class MyPlugin(PluginDefault):
def __init__(self, project, plugin_position):
self.name = 'Correlation Matrix'
self.widget = Widget(project, plugin_position)
super().__init__(self.widget, self.widget.Labels, self.name)
def run(self):
pass
| StarcoderdataPython |
119616 | <filename>Cartwheel/lib/Python26/Lib/site-packages/OpenGL/GL/ARB/vertex_shader.py<gh_stars>0
'''OpenGL extension ARB.vertex_shader
This module customises the behaviour of the
OpenGL.raw.GL.ARB.vertex_shader to provide a more
Python-friendly API
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.ARB.vertex_shader import *
### END AUTOGENERATED SECTION
from OpenGL.lazywrapper import lazy
from shader_objects import glGetObjectParameterivARB
base_glGetActiveAttribARB = glGetActiveAttribARB
def glGetActiveAttribARB(program, index):
"""Retrieve the name, size and type of the uniform of the index in the program"""
max_index = int(glGetObjectParameterivARB( program, GL_OBJECT_ACTIVE_ATTRIBUTES_ARB ))
length = int(glGetObjectParameterivARB( program, GL_OBJECT_ACTIVE_ATTRIBUTE_MAX_LENGTH_ARB))
if index < max_index and index >= 0 and length > 0:
name = ctypes.create_string_buffer(length)
size = arrays.GLintArray.zeros( (1,))
gl_type = arrays.GLuintArray.zeros( (1,))
base_glGetActiveAttribARB(program, index, length, None, size, gl_type, name)
return name.value, size[0], gl_type[0]
raise IndexError, 'index out of range from zero to %i' % (max_index - 1, )
glGetActiveAttribARB.wrappedOperation = base_glGetActiveAttribARB
@lazy( glGetAttribLocationARB )
def glGetAttribLocationARB( baseOperation, program, name ):
"""Check that name is a string with a null byte at the end of it"""
if not name:
raise ValueError( """Non-null name required""" )
elif name[-1] != '\000':
name = name + '\000'
return baseOperation( program, name )
| StarcoderdataPython |
5129368 | from lgsf.councillors.scrapers import CMISCouncillorScraper
class Scraper(CMISCouncillorScraper):
base_url = "http://democracy.bolton.gov.uk/cmis5/People.aspx"
| StarcoderdataPython |
3373381 | # problem67.py
r = [ ]
file = open("problem67.txt", "r")
for x in file:
r.append([int(i) for i in x.split(" ")])
for i, j in [(i, j) for i in range(len(r) -2, -1, -1) for j in range(i + 1)]:
r[i][j] += max([r[i + 1][j], r[i + 1][j + 1]])
print(r[0][0])
| StarcoderdataPython |
154803 | <reponame>samwel2000/portfolio-backend<filename>blog/urls.py<gh_stars>0
from django.urls import path
from .views import *
app_name = 'blog_api'
urlpatterns = [
path('posts-category/', PostCategories.as_view(), name="posCategories"),
path('posts/', PostList.as_view(), name="postlist"),
path('posts/<str:filter>/', FilterPostList.as_view(), name="filter-postlist"),
path('post/<slug:slug>/', PostDetail.as_view(), name="postdetail"),
path('hero-content/', HeroContentList.as_view(), name="heroContentlist"),
path('about-content/', AboutContentList.as_view(), name="aboutContentlist"),
path('experience-content/', ExperienceContentList.as_view(),
name="experienceContentlist"),
path('experience-organization/', OrganizationList.as_view(),
name="OrganizationListlist"),
path('skills-list/', SkillList.as_view(), name="skillslist"),
path('contact-content/', ContactContentList.as_view(),
name="contactContentlist"),
path('projects-list/', ProjectstList.as_view(), name="projectslist"),
path('subscribe/', CreateSubscriber.as_view(), name="suscribe"),
path('resume/', ResumeView.as_view(), name="resume"),
path('comment/', CommentView.as_view(), name="resume"),
]
| StarcoderdataPython |
8016703 | r"""
封装处理NLP各个任务的数据
"""
import torch
from torch.utils.data import Dataset
class DatasetCLF(Dataset):
def __init__(self,
data,
tokenizer,
max_len=512,
load_label=True,
**kwargs):
"""封装文本分类数据集,现在仅支持二分类和多分类,暂不支持多标签分类
:param data: 标准数据格式为List[List[str, int]],例如[[’今天天气很好‘, 1], ['我心情不好', 0]]
:param tokenizer: transformers.xxxTokenizer
:param max_len: 分词后的文本长度上限,default=512
:param load_label: 是否加载标签,default=True
:param model_type:
"""
self.tokenizer = tokenizer
self.max_len = max_len
self.data = data
self.load_label = load_label
def __len__(self):
return len(self.data)
def __getitem__(self, index):
text = self.data[index][0]
encode_inputs = self.tokenizer(text,
truncation=True,
max_length=self.max_len,
return_tensors='pt')
example = {}
for key, value in encode_inputs.items():
example[key] = value[0]
if self.load_label:
label = self.data[index][1]
example['labels'] = torch.tensor(label, dtype=torch.long)
return example
return example
| StarcoderdataPython |
11229907 | from google.appengine.api import urlfetch
import urllib
from helpers import authorization
import logging
#returns the response object
def request_twiml(Account, Url, Method, Payload):
if Method == 'GET':
url = Url + '?' +'&'.join(k+'='+v for k,v in Payload.iteritems())
Twilio_Signature = authorization.create_twilio_authorization_hash(Account, Url, Payload, Method = 'GET')
response = urlfetch.fetch(url = url,method = urlfetch.GET, headers = {'X-Twilio-Signature':Twilio_Signature} )
else:
Twilio_Signature = authorization.create_twilio_authorization_hash(Account, Url, Payload, Method='POST')
response = urlfetch.fetch(url = Url,method = urlfetch.POST, payload = urllib.urlencode(Payload) , headers = {'X-Twilio-Signature':Twilio_Signature} )
return response | StarcoderdataPython |
335082 | <filename>awsm/keys/__init__.py
import os
from .exceptions import KeyNotFoundError
from .validators import KEY_SCHEMA
from awsm.storage.file_storage import USER_KEYS_CFG
from awsm.utils import load_from_yaml
from voluptuous.humanize import validate_with_humanized_errors
class KeyManager(object):
def __init__(self):
super(KeyManager, self).__init__()
self._keys = None
self._load_keys()
def _load_keys(self):
self._keys = {
name: validate_with_humanized_errors(config, KEY_SCHEMA)
for name, config in load_from_yaml(USER_KEYS_CFG).items()
}
def get(self, name):
key = self._keys.get(name)
if key is None:
raise KeyNotFoundError(name)
return key['name'], key['path']
def find_path(self, name):
path = None
try:
_, path = self.get(name)
except KeyNotFoundError:
for key_dict in self._keys.values():
if name == key_dict['name']:
path = key_dict['path']
break
finally:
if path is None:
raise KeyNotFoundError(name)
path = os.path.abspath(os.path.expanduser(path))
if not os.path.exists(path):
raise KeyNotFoundError(
name, 'Key file \'{}\' does not exist.'.format(path))
return path
| StarcoderdataPython |
51166 | <gh_stars>0
import overhang
def test_Overhang():
oh_aaa = overhang.Overhang("AAA")
assert oh_aaa.overhang == "AAA"
assert oh_aaa.overhang_rc == "TTT"
assert oh_aaa.has_multimer is True # tests count_max_repeat()
assert oh_aaa.is_good() is True
assert oh_aaa.gc_content == 0
oh_ttaa = overhang.Overhang("TTAA")
assert oh_ttaa.is_palindromic is True
expected = [
"L[KIRTMSN]",
"[YFPILRTHSCDNAGV]*",
"[IFVL][KN]",
"L[KIRTMSN]",
"[YFPILRTHSCDNAGV]*",
"[IFVL][KN]",
]
for index, pattern in enumerate(oh_ttaa.aa_patterns):
assert set(pattern) == set(expected[index])
# first example AAA had no start / stop codons
oh_atga = overhang.Overhang("ATGA")
assert oh_atga.has_start_codon is True
assert oh_atga.has_stop_codon is True
assert overhang.Overhang("ATGT").has_rc_start_codon is True
assert overhang.Overhang("TGA").has_rc_stop_codon is True
def test_generate_all_overhangs():
assert len(overhang.generate_all_overhangs(3)) == 32
| StarcoderdataPython |
3270159 | <gh_stars>0
#
# 28. Implement strStr()
#
# Implement strStr().
#
# Return the index of the first occurrence of needle in haystack, or -1 if
# needle is not part of haystack.
#
# Example 1:
# Input: haystack = "hello", needle = "ll"
# Output: 2
#
# Example 2:
# Input: haystack = "aaaaa", needle = "bba"
# Output: -1
#
# Clarification:
#
# What should we return when needle is an empty string? This is a great
# question to ask during an interview.
#
# For the purpose of this problem, we will return 0 when needle is an empty
# string. This is consistent to C's strstr() and Java's indexOf().
#
class Solution(object):
def strStr(self, haystack, needle):
'''
:type haystack: str
:type needle: str
:rtype: int
'''
| StarcoderdataPython |
6669317 | import logging
from .... import app_settings
from . import elastic
logger = logging.getLogger('django_sso_app')
ELASTICSEARCH_INDEX_URL = app_settings.EVENTS_HOST + '/{}/_search'.format(app_settings.EVENTS_INDEX)
def fetch_event_type_events(event_type: str, from_date: str, jwt: str = None, apikey: str = None) -> (object, int):
"""
Returns event objects given event_type
:param event_type:
:param from_date:
:param jwt:
:param apikey:
:return:
"""
ES_QUERY = """{
"from": 0,
"size": 1000,
"sort" : [
{ "doc.timestamp" : {"order" : "asc"}}
],
"query": {
"bool": {
"filter": [
{
"range": {
"doc.timestamp": {
"gte": "{{ from_date }}"
}
}
},
{
"terms": {
"doc.type": [
"{{ event_type }}"
]
}
}
]
}
}
}"""
def _fetch_data(event_type, from_date):
es_url = ELASTICSEARCH_INDEX_URL
es_query = elastic.get_es_query(ES_QUERY,
event_type=event_type,
from_date=from_date)
# print('CALLING ES', es_query, es_url)
return elastic.call_backend(es_url, es_query, jwt=jwt, apikey=apikey)
def _cleaned_data(data):
for d in data:
yield d['_source']['doc']
es_response = _fetch_data(event_type, from_date)
instance_tasks, count = es_response['hits']['hits'], es_response['hits']['total']['value']
# return generator and total hits count
return _cleaned_data(instance_tasks), count
def sort_event_type_events(events, event_type):
# filter related procedure events, sort ascending by timestamp and return
return sorted(filter(lambda x: x['type'] == event_type, events),
key=lambda x: x['timestamp'], reverse=False)
| StarcoderdataPython |
9626517 | <filename>Programas de exercicios/exercicio078.py<gh_stars>0
m = []
n1 = []
n2 = []
for c in range(0,5):
m.append(input(f'digite o numero na posição {c}: '))
print(f'Sua lista foi {m}')
c = 0
for r in m:
if c == 0:
maior = int(r)
menor = int(r)
else:
if maior < int(r):
maior = int(r)
elif menor > int(r):
menor = int(r)
c += 1
n1.append(m.index(str(maior)))
if m.count(str(maior)) > 1:
corretor = m.index(str(maior))+1
daliprafrente = m.index(str(maior))+1
num = m[daliprafrente:].index(str(maior)) + corretor
n1.append(num)
if m.count(str(maior)) > 2:
num = m[num:].index(str(maior)) + corretor + 1
n1.append(num)
if m.count(str(maior)) > 3:
num = m[num:].index(str(maior)) + corretor + 2
n1.append(num)
if m.count(str(maior)) > 4:
num = m[num:].index(str(maior)) + corretor + 3
n1.append(num)
#para saber o menor
n2.append(m.index(str(menor)))
if m.count(str(menor)) > 1:
corretor = +1
daliprafrente = m.index(str(menor))+1
num = m[daliprafrente:].index(str(menor)) + corretor
n2.append(num)
if m.count(str(menor)) > 2:
num = m[num:].index(str(menor)) + corretor + 1
n2.append(num)
if m.count(str(menor)) > 3:
num = m[num:].index(str(menor)) + corretor + 2
n2.append(num)
if m.count(str(menor)) > 4:
num = m[num:].index(str(menor)) + corretor + 3
n2.append(num)
print(f'O numero maior foi {maior} e estão nas posições: {n1}')
for i, v in enumerate(m):
if v == str(maior):
print(i, end='')
print()
print(f'O numero menor foi {menor} e estão nas posições: {n2}')
for i, v in enumerate(m):
if v == str(menor):
print(i, end='')
print() | StarcoderdataPython |
372845 | <filename>pyxl/scripts/parse_file.py<gh_stars>100-1000
#!/usr/bin/env python
import sys
from pyxl.codec.transform import pyxl_invert_string, pyxl_transform_string
if __name__ == '__main__':
invert = invertible = False
if sys.argv[1] == '-i':
invertible = True
fname = sys.argv[2]
elif sys.argv[1] == '-r':
invert = True
fname = sys.argv[2]
else:
fname = sys.argv[1]
with open(fname, 'r') as f:
contents = f.read()
if invert:
print(pyxl_invert_string(contents), end='')
else:
print(pyxl_transform_string(contents, invertible), end='')
| StarcoderdataPython |
10473 | <reponame>vault-the/changes
from changes.api.base import APIView
from changes.lib.coverage import get_coverage_by_build_id, merged_coverage_data
from changes.models.build import Build
class BuildTestCoverageAPIView(APIView):
def get(self, build_id):
build = Build.query.get(build_id)
if build is None:
return '', 404
coverage = merged_coverage_data(get_coverage_by_build_id(build.id))
return self.respond(coverage)
| StarcoderdataPython |
3598164 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: spirrobe -> github.com/spirrobe/
"""
def ConcPerCCM(bins,
windspeed=[1], # in m/s
samplearea=0.298, # as area in square millimeters
samplefrequency=10, # as Hz
combined=True):
import numpy as np
# windspeed defaults to 1 meter per seconds windspeed
# samplearea defaults to 0.298 as given by CDP specs
# make cm/s out of it
if not type(windspeed) == np.ndarray:
windspeed = np.asarray(windspeed, dtype=np.float)
else:
windspeed = windspeed.copy()
# convert windspeed first to cm per s and then to cm adjusted to
# sampling frequency, as we look for cm and we still have cm/s
windspeed = windspeed * (100 / samplefrequency)
if np.nanmin(windspeed) < 0.0:
print('*' * 30 + '\nWarning: Wind speed contains negative values.\n' +
' Concentration and derived parameters may be ' +
'negative therefore\n' +
'Use np.abs(x) on result to ammend this if needed\n' + '*' * 30)
elif np.nanmax(windspeed) > 1000:
print('*' * 30,
'Warning: Wind speed max seems high with >1000 cm ',
'per samplinginterval', samplefrequency,
'\n Make sure all parameters are proper',
'*' * 30)
samplearea /= 10**2 # make cm instead of millimeters out of it
volume = windspeed * samplearea # make a volume out of it
# set volume to nan if its zero because then its not physically correct
volume[volume == 0] = np.nan
if combined:
concperccm = np.nansum(bins, axis=1) / volume
else:
concperccm = bins / np.repeat(volume[:, np.newaxis],
bins.shape[1], axis=1)
concperccm = np.nan_to_num(concperccm)
return concperccm
| StarcoderdataPython |
3561098 | import django.db.models.deletion
from django.db import migrations, models
from djangocms_blog.models import thumbnail_model
class Migration(migrations.Migration):
dependencies = [
("djangocms_blog", "0020_thumbnail_move4"),
]
operations = [
migrations.AlterField(
model_name="authorentriesplugin",
name="cmsplugin_ptr",
field=models.OneToOneField(
parent_link=True,
related_name="djangocms_blog_authorentriesplugin",
auto_created=True,
primary_key=True,
serialize=False,
to="cms.CMSPlugin",
on_delete=models.deletion.CASCADE,
),
),
migrations.AlterField(
model_name="genericblogplugin",
name="cmsplugin_ptr",
field=models.OneToOneField(
parent_link=True,
related_name="djangocms_blog_genericblogplugin",
auto_created=True,
primary_key=True,
serialize=False,
to="cms.CMSPlugin",
on_delete=models.deletion.CASCADE,
),
),
migrations.AlterField(
model_name="latestpostsplugin",
name="cmsplugin_ptr",
field=models.OneToOneField(
parent_link=True,
related_name="djangocms_blog_latestpostsplugin",
auto_created=True,
primary_key=True,
serialize=False,
to="cms.CMSPlugin",
on_delete=models.deletion.CASCADE,
),
),
migrations.AlterField(
model_name="post",
name="main_image_full",
field=models.ForeignKey(
related_name="djangocms_blog_post_full",
on_delete=django.db.models.deletion.SET_NULL,
verbose_name="main image full",
blank=True,
to=thumbnail_model,
null=True,
),
),
migrations.AlterField(
model_name="post",
name="main_image_thumbnail",
field=models.ForeignKey(
related_name="djangocms_blog_post_thumbnail",
on_delete=django.db.models.deletion.SET_NULL,
verbose_name="main image thumbnail",
blank=True,
to=thumbnail_model,
null=True,
),
),
]
| StarcoderdataPython |
6421165 | <reponame>MinisterioPublicoRJ/api-cadg
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from rest_framework.generics import (
GenericAPIView,
RetrieveAPIView,
ListAPIView
)
from rest_framework.response import Response
from .cache import (
get_cache,
save_cache,
ENTITY_KEY_PREFIX,
ENTITY_KEY_CHECK,
DATA_ENTITY_KEY_PREFIX,
DATA_ENTITY_KEY_CHECK,
DATA_DETAIL_KEY_PREFIX,
DATA_DETAIL_KEY_CHECK
)
from .db_connectors import execute_geospatial
from .jwt_manager import get_permissions
from .models import Entidade, DadoDetalhe, DadoEntidade
from .osmapi import query as osmquery
from .serializers import (
EntidadeSerializer,
DadoDetalheSerializer,
DadoEntidadeSerializer,
EntidadeIdSerializer
)
class EntidadeView(GenericAPIView):
serializer_class = EntidadeSerializer
def get(self, request, *args, **kwargs):
permissions = get_permissions(request)
obj = get_object_or_404(
Entidade.objects.get_authorized(permissions),
abreviation=self.kwargs['entity_type']
)
# O cache é visto somente aqui, para garantir que
# não bypasse as permissões
cache = get_cache(ENTITY_KEY_PREFIX, kwargs)
if cache:
return cache
data = EntidadeSerializer(obj, domain_id=self.kwargs['domain_id']).data
if not data['exibition_field']:
raise Http404
if obj.is_cacheable:
save_cache(data, ENTITY_KEY_PREFIX, ENTITY_KEY_CHECK, kwargs)
return Response(data)
class DadoEntidadeView(RetrieveAPIView):
serializer_class = DadoEntidadeSerializer
def get(self, request, *args, **kwargs):
permissions = get_permissions(request)
obj = get_object_or_404(
DadoEntidade.objects.get_authorized(permissions),
entity_type__abreviation=self.kwargs['entity_type'],
pk=self.kwargs['pk']
)
# O cache é visto somente aqui, para garantir que
# não bypasse as permissões
cache = get_cache(DATA_ENTITY_KEY_PREFIX, kwargs)
if cache:
return cache
data = DadoEntidadeSerializer(
obj,
domain_id=self.kwargs['domain_id']
).data
if not data['external_data']:
raise Http404
if obj.is_cacheable:
save_cache(
data,
DATA_ENTITY_KEY_PREFIX,
DATA_ENTITY_KEY_CHECK,
kwargs
)
return Response(data)
class DadoDetalheView(RetrieveAPIView):
serializer_class = DadoDetalheSerializer
def get(self, request, *args, **kwargs):
permissions = get_permissions(request)
obj = get_object_or_404(
DadoDetalhe.objects.get_authorized(permissions),
dado_main__entity_type__abreviation=self.kwargs['entity_type'],
pk=self.kwargs['pk']
)
# O cache é visto somente aqui, para garantir que
# não bypasse as permissões
cache = get_cache(DATA_DETAIL_KEY_PREFIX, kwargs)
if cache:
return cache
data = DadoDetalheSerializer(
obj,
domain_id=self.kwargs['domain_id']
).data
if not data['external_data']:
raise Http404
if obj.is_cacheable:
save_cache(
data,
DATA_DETAIL_KEY_PREFIX,
DATA_DETAIL_KEY_CHECK,
kwargs
)
return Response(data)
@method_decorator(cache_page(600, key_prefix='lupa_osm'), name='dispatch')
class OsmQueryView(ListAPIView):
queryset = []
def get(self, request, *args, **kwargs):
return Response(osmquery(self.kwargs['terms']))
@method_decorator(cache_page(600, key_prefix='lupa_geospat'), name='dispatch')
class GeoSpatialQueryView(ListAPIView):
serializer_class = EntidadeIdSerializer
queryset = []
def get(self, request, lat, lon, value):
entity_type = Entidade.objects.filter(
osm_value_attached=value).first()
if not entity_type:
entity_type = Entidade.objects.filter(
osm_default_level=True).first()
if not entity_type:
raise Http404
entity = execute_geospatial(
entity_type.database,
entity_type.schema,
entity_type.table,
entity_type.geojson_column,
entity_type.id_column,
[lat, lon]
)
if not entity:
raise Http404
entity = entity[0][0]
serializer = EntidadeIdSerializer(
entity_type,
entity_id=entity
)
return Response(serializer.data)
| StarcoderdataPython |
85252 | import warnings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from gears.asset_handler import BaseAssetHandler
from gears.finders import BaseFinder
_cache = {}
def _get_module(path):
try:
return import_module(path)
except ImportError as e:
raise ImproperlyConfigured('Error importing module %s: "%s".' % (path, e))
def _get_module_attr(module_path, name):
try:
return getattr(_get_module(module_path), name)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" obj.' % (module_path, name))
def _get_object(path):
if path not in _cache:
_cache[path] = _get_module_attr(*path.rsplit('.', 1))
return _cache[path]
def get_cache(path, options=None):
cls = _get_object(path)
return cls(**(options or {}))
def get_finder(path, options=None):
cls = _get_object(path)
if not issubclass(cls, BaseFinder):
raise ImproperlyConfigured('"%s" is not a subclass of BaseFinder.' % path)
return cls(**(options or {}))
def get_asset_handler(path, options=None):
obj = _get_object(path)
try:
if issubclass(obj, BaseAssetHandler):
return obj.as_handler(**(options or {}))
except TypeError:
pass
if callable(obj):
if options is not None:
warnings.warn('%r is provided as %r handler options, but not used '
'because this handler is not a BaseAssethandler subclass.'
% (options, path))
return obj
raise ImproperlyConfigured('"%s" must be a BaseAssetHandler subclass or callable object' % path)
| StarcoderdataPython |
4856947 | <reponame>sourav-majumder/qtlab<filename>instrument_plugins/ThorlabsFTD2XX.py<gh_stars>0
#
# Copyright (C) 2011 <NAME>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import _ftd2xx as f
from _ftd2xx.defines import *
from time import sleep
from instrument import Instrument
import types
import logging
class ThorlabsFTD2XX(Instrument):
def __init__(self,name, HWSerialNumber='83828433', StageType='PRM1MZ8'):
logging.info(__name__ + ' : Initializing instrument Thorlabs driver')
Instrument.__init__(self, name, tags=['physical'])
# TODO: Fix when device is already initialized and driver is reloaded!!
# Obtain handle from driver itself
try:
L = f.listDevices()
except f.DeviceError:
print "No active devices!!"
L = ['None']
print L
if '83828433' in L:
self.g = f.openEx('83828433')
else:
# Alternative load
f.createDeviceInfoList()
t = f.getDeviceInfoDetail()
if '83828433' == t['serial']:
h = t['handle']
H = h.value
self.g = f.FTD2XX(H)
self.g.setBaudRate(115200)
self.g.setDataCharacteristics(f.defines.BITS_8, f.defines.STOP_BITS_1, f.defines.PARITY_NONE)
sleep(0.2)
self.g.purge(f.defines.PURGE_RX | f.defines.PURGE_TX)
sleep(0.2)
self.g.resetDevice()
self.g.setFlowControl(f.defines.FLOW_RTS_CTS,0,0)
self.g.setRts()
# Add functions
self.add_function('Identify')
self.add_function ('GoHome')
self.add_function ('Close')
self.add_function ('StopMoving')
self.add_function ('EnableChannel1')
self.add_function ('DisableChannel1')
self.add_function ('MoveJogPos')
self.add_function ('MoveJogNeg')
self.add_function ('MoveRelative')
self.add_function ('ReturnStatus')
# Add parameters
self.add_parameter('Position',
flags=Instrument.FLAG_GETSET, units='deg', minval=-720, maxval=720, type=types.FloatType)
self.add_parameter('IsMoving',
flags=Instrument.FLAG_GET, type=types.BooleanType)
self.status = {}
self.get_Position()
# def __del__(self):
# print "Bye!!"
# self.g.close()
# Fixme: release handle
def Identify(self):
self.g.write("\x23\x02\x00\x00\x50\x01")
def GoHome(self):
self.g.write("\x43\x04\x01\x00\x50\x01")
def Close(self):
self.g.close()
def ReadBuffer(self):
n = self.g.getQueueStatus()
return self.g.read(n)
def StatusbytesToPosition(self, blist):
## Add stuff to analyse the statusbits and return a dict
status = {}
pos = ord(blist[8]) + 256*ord(blist[9]) + 256*256*ord(blist[10]) + 256*256*256*ord(blist[11])
status1 = ord(blist[16])
status2 = ord(blist[17])
status3 = ord(blist[18])
status4 = ord(blist[19])
status['pos']=pos
if status1%2>0:
status['CW_HW_lim'] = True
pass
else:
status['CW_HW_lim'] = False
pass
if status1%4>1:
status['CCW_HW_lim'] = True
pass
else:
status['CCW_HW_lim'] = False
pass
if status1%8>2:
status['CW_SW_lim'] = True
pass
else:
status['CW_SW_lim'] = False
pass
if status1%16>4:
status['CCW_SW_lim'] = True
pass
else:
status['CCW_SW_lim'] = False
pass
if status1%32>8:
status['Moving_CW'] = True
pass
else:
status['Moving_CW'] = False
pass
if status1%64>16:
status['Moving_CCW'] = True
pass
else:
status['Moving_CCW'] = False
pass
if status1%128>32:
status['Jogging_CW'] = True
pass
else:
status['Jogging_CW'] = False
pass
if status1%256>64:
status['Jogging_CCW'] = True
pass
else:
status['Jogging_CCW'] = False
pass
if status2%2>0:
status['Connected'] = True
pass
else:
status['Connected'] = False
pass
if status2%4>1:
status['Homing'] = True
pass
else:
status['Homing'] = False
pass
if status2%8>2:
status['Homed'] = True
pass
else:
status['Homed'] = False
pass
if status2%16>4:
status['Misc'] = True
pass
else:
status['Misc'] = False
pass
if status2%32>8:
status['Interlock'] = True
pass
else:
status['Interlock'] = False
pass
self.status = status
return status
def ReturnStatus(self):
return self.status
def do_get_IsMoving(self):
self.ReadBuffer()
self.g.write('\x90\x04\x01\x00\x50\x01')
sleep(0.1)
while(self.g.getQueueStatus()==0): # This is dangerous!!
sleep(0.5)
stat = (self.StatusbytesToPosition(self.ReadBuffer()))
return (stat['Moving_CW'] or stat['Moving_CCW'])
def do_get_Position(self):
self.ReadBuffer()
self.g.write('\x90\x04\x01\x00\x50\x01')
sleep(0.1)
while(self.g.getQueueStatus()==0): # This is dangerous!!
sleep(0.5)
valold = (self.StatusbytesToPosition(self.ReadBuffer()))['pos']
if valold >= 2147483648:
val = (valold-4294967296)/1920.0
else:
val = valold/1920.0
return val
def do_set_Position(self,pos):
num = int(pos*1920)
byte1 = num%256
byte2 = int(num/256)%256
byte3 = int(num/256/256)%256
byte4 = int(num/256/256/256)%256
str = '\x53\x04\x06\x00\x80\x01\x01\x00' + chr(byte1) + chr(byte2) + chr(byte3) + chr(byte4)
self.g.write(str)
def MoveRelative(self,move):
num = int(move*1920)
byte1 = num%256
byte2 = int(num/256)%256
byte3 = int(num/256/256)%256
byte4 = int(num/256/256/256)%256
str = '\x48\x04\x06\x00\x80\x01\x01\x00' + chr(byte1) + chr(byte2) + chr(byte3) + chr(byte4)
self.g.write(str )
def StopMoving(self):
self.g.write('\x65\x04\x01\x02\x50\x01')
def EnableChannel1(self):
self.g.write('\x10\x02\x01\x01\x50\x01')
def DisableChannel1(self):
self.g.write('\x10\x02\x01\x01\x50\x01')
def MoveJogPos(self):
self.g.write('\x6A\x04\x01\x02\x50\x01')
def MoveJogNeg(self):
self.g.write('\x6A\x04\x01\x02\x50\x01')
| StarcoderdataPython |
3440611 | <reponame>vkareh/managed-tenants-cli<filename>managedtenants/bundles/addon_bundles.py
import logging
from functools import lru_cache
from pathlib import Path
import jsonschema
import semver
import yaml
from sretoolbox.utils.logger import get_text_logger
from managedtenants.bundles.bundle import Bundle
from managedtenants.bundles.exceptions import AddonBundlesError
from managedtenants.bundles.imageset import ImageSet
from managedtenants.bundles.utils import get_subdirs
from managedtenants.data.paths import SCHEMAS_DIR
from managedtenants.utils.git import get_short_hash
from managedtenants.utils.schema import load_schema
class AddonBundles:
"""
Parses an addon_bundles_dir into a list of bundles.
Example directory structure:
gpu-operator
├── main
│ ├── 1.9.0
└── nfd-operator
└── 4.8.0
Lexicography:
- "gpu-operator" is the main operator
- "nfd-operator" is a dependency operator
- "1.9.0/" is the bundle associated with the "gpu-operator"
- "4.8.0/" is the bundle associated with the "nfd-operator"
Notes:
- each bundle directory must have a valid semver name
- there can be an unlimited number of dependency operators
- if the `--single-bundle-per-operator` flag is provided, there can only
be a single-bundle-per-operator
"""
def __init__(self, root_dir, debug=False, single_bundle=False):
self.log = get_text_logger(
"managedtenants-addon-bundles",
level=logging.DEBUG if debug else logging.INFO,
)
self.single_bundle = single_bundle
self.root_dir = Path(root_dir)
self.addon_name = self.root_dir.name
self.main_bundle = self._parse_main_bundle()
self.dependency_bundles = self._parse_dependency_bundles()
self.config = self._parse_and_validate_config()
def _parse_main_bundle(self):
operator_dir = self.root_dir / "main"
if not operator_dir.is_dir():
raise AddonBundlesError(
f"invalid structure for {self.root_dir}: {operator_dir} does"
" not exist"
)
return self._parse_operator_bundle(operator_dir)
def _parse_dependency_bundles(self):
res = []
for operator_dir in get_subdirs(self.root_dir):
if operator_dir.name != "main":
res.extend(self._parse_operator_bundle(operator_dir))
return res
def _parse_operator_bundle(self, operator_dir):
"""
Parse the bundle associated with the operator_dir. We enforce the
single-bundle-per-operator pattern.
"""
res = []
for path in get_subdirs(operator_dir):
bundle = Bundle(
addon_name=self.addon_name,
path=path.resolve(),
operator_name=self._get_bundle_operator_name(operator_dir),
version=path.name,
single_bundle=self.single_bundle,
)
res.append(bundle)
if self.single_bundle:
if len(res) != 1:
raise AddonBundlesError(
f"invalid structure for {self.root_dir}: expected"
f" {operator_dir} to contain exactly 1 bundle, but found"
f" {len(res)} bundles (single-bundle-per-operator pattern)."
)
else:
if len(res) == 0:
raise AddonBundlesError(
f"invalid structure for {self.root_dir}:"
f" {operator_dir} contains zero bundles."
)
return res
def _get_bundle_operator_name(self, operator_dir):
operator_name = operator_dir.name
return self.addon_name if operator_name == "main" else operator_name
def _parse_and_validate_config(self):
config_file = self.root_dir / "main" / "config.yaml"
try:
with open(config_file, "r", encoding="utf-8") as f:
config = yaml.safe_load(f)
self._validate_config(config)
return config
except FileNotFoundError:
err_msg = f"missing {config_file} for {self}."
self.log.error(err_msg)
raise AddonBundlesError(err_msg)
except yaml.YAMLError as e:
raise AddonBundlesError(
f"failed to parse {config_file} for {self}: {e}."
)
def _validate_config(self, config):
try:
jsonschema.validate(
instance=config,
schema=load_schema("mtbundles"),
# required to resolve $ref: *.json
resolver=jsonschema.RefResolver(
base_uri=f"file://{SCHEMAS_DIR}/",
referrer="mtbundles.schema.yaml",
),
)
except jsonschema.exceptions.SchemaError as e:
raise AddonBundlesError(f"mtbundles schema error: {e}")
except jsonschema.exceptions.ValidationError as e:
raise AddonBundlesError(f"schema validation error for {self}: {e}")
@lru_cache(maxsize=None)
def _get_latest_version(self):
"""
Returns the latest version amongst the main_bundles.
"""
all_versions = [bundle.version for bundle in self.main_bundle]
return max(all_versions, key=semver.VersionInfo.parse)
def get_all_bundles(self):
"""
Returns an addon's main_bundles and dependency_bundles.
"""
return self.main_bundle + self.dependency_bundles
def get_all_imagesets(self, index_image):
"""
Produce all imagesets for a given index_image.
:param index_image_str: representation of an index_image
"""
res = []
version = self._get_latest_version()
ocm_config = self._get_ocm_config()
for addon in self.config["addons"]:
for env in addon["environments"]:
imageset = ImageSet(
addon_name=addon["name"],
env=env,
version=version,
index_image=index_image,
ocm_config=ocm_config,
)
res.append(imageset)
return res
def _get_ocm_config(self):
"""
OCM config is optional in the schema. Defaults have to conform with the
schema (imageset.schema.yaml).
"""
ocm_config = self.config.get("ocm", {})
def ocm_config_getter(k, default):
return ocm_config.get(k, default)
return {
k: ocm_config_getter(k, default)
for k, default in [
("addOnParameters", []),
("addOnRequirements", []),
("subOperators", []),
("subscriptionConfig", {"env": []}),
]
}
def get_all_metadata_paths(self):
"""
Returns all metadata paths in managed-tenants related to this
AddonBundles.
"""
res = []
for addon in self.config["addons"]:
for env in addon["environments"]:
res.append(f"addons/{addon['name']}/metadata/{env}/addon.yaml")
return res
def get_unique_name(self):
"""
Provide a unique name to identify an AddonBundles. Used for both the
merge request title and branch name.
"""
return (
f"{self.addon_name}-{self._get_latest_version()}-{get_short_hash()}"
)
def __str__(self):
return (
f"AddonBundles(root_dir={self.root_dir},"
f" addon_name={self.addon_name},"
f" latest_version={self._get_latest_version()})"
)
| StarcoderdataPython |
279372 | <reponame>pablogo1/statistics
from problem1 import Problem1
from problem2 import Problem2
if __name__ == "__main__":
Problem1().run(19.5, 20, 22)
Problem2().run(80, 60)
# result_p2 = Problem2().run(20, 22)
# print(f"{result_p1:.3f}")
# print(f"{result_p2:.3f}")
| StarcoderdataPython |
68236 | <reponame>kb2ma/openvisualizer
# Copyright (c) 2010-2013, Regents of the University of California.
# All rights reserved.
#
# Released under the BSD 3-Clause license as published at the link below.
# https://openwsn.atlassian.net/wiki/display/OW/License
import logging
from openvisualizer.utils import buf2int, hex2buf
log = logging.getLogger('SixLowPanFrag')
log.setLevel(logging.INFO)
log.addHandler(logging.NullHandler())
# ============================ parameters ======================================
class ReassembleEntry(object):
def __init__(self, wanted, received, frag):
self.total_bytes = wanted
self.recvd_bytes = received
self.fragments = frag
class Fragmentor(object):
"""
Class which performs fragmentation and reassembly of 6LoWPAN packets for transport of IEEE 802.15.4 networks.
This class implements the following RFCs;
* *https://tools.ietf.org/html/rfc4944*
Transmission of IPv6 Packets over IEEE 802.15.4 Networks.
"""
FRAG1_DISPATCH = 0xC0
FRAGN_DISPATCH = 0xE0
FRAG_DISPATCH_MASK = 0xF8
FRAG_SIZE_MASK = 0x7FF
# If L2 security is not active in the network we can use up to 96 bytes of payload per fragment.
# Since openvisualizer is not aware of the security configuration of the network, we use by default a smaller
# fragment payload size.
MAX_FRAGMENT_SIZE = 80
FRAG1_HDR_SIZE = 4
FRAGN_HDR_SIZE = 5
def __init__(self, tag=1):
self.reassemble_buffer = dict()
self.datagram_tag = tag
def do_reassemble(self, lowpan_pkt):
reassembled_pkt = None
# parse fragmentation header
dispatch = lowpan_pkt[0] & self.FRAG_DISPATCH_MASK
datagram_size = buf2int(lowpan_pkt[:2]) & self.FRAG_SIZE_MASK
if dispatch not in [self.FRAG1_DISPATCH, self.FRAGN_DISPATCH]:
return lowpan_pkt
# extract fragmentation tag
datagram_tag = buf2int(lowpan_pkt[2:4])
if dispatch == self.FRAG1_DISPATCH:
payload = lowpan_pkt[4:]
offset = 0
else:
payload = lowpan_pkt[5:]
offset = lowpan_pkt[4]
if datagram_tag in self.reassemble_buffer:
entry = self.reassemble_buffer[datagram_tag]
entry.recvd_bytes += len(payload)
entry.fragments.append((offset, payload))
else:
new_entry = ReassembleEntry(datagram_size, len(payload), [(offset, payload)])
self.reassemble_buffer[datagram_tag] = new_entry
# check if we can reassemble
num_of_frags = 0
used_tag = 0
for tag, entry in self.reassemble_buffer.items():
if entry.total_bytes == entry.recvd_bytes:
frags = sorted(entry.fragments, key=lambda frag: frag[0])
used_tag = tag
num_of_frags = len(frags)
reassembled_pkt = []
for frag in frags:
reassembled_pkt.extend(frag[1])
del self.reassemble_buffer[tag]
if reassembled_pkt is not None:
log.success("[GATEWAY] Reassembled {} frags with tag {} into an IPv6 packet of size {}".format(
num_of_frags, used_tag, len(reassembled_pkt)))
return reassembled_pkt
def do_fragment(self, ip6_pkt):
fragment_list = []
original_length = len(ip6_pkt)
if len(ip6_pkt) <= self.MAX_FRAGMENT_SIZE + self.FRAGN_HDR_SIZE:
return [ip6_pkt]
while len(ip6_pkt) > 0:
frag_header = []
fragment = []
datagram_tag = hex2buf("{:04x}".format(self.datagram_tag))
if len(ip6_pkt) > self.MAX_FRAGMENT_SIZE:
frag_len = self.MAX_FRAGMENT_SIZE
else:
frag_len = len(ip6_pkt)
if len(fragment_list) == 0:
# first fragment
dispatch_size = hex2buf("{:02x}".format((self.FRAG1_DISPATCH << 8) | original_length))
frag_header.extend(dispatch_size)
frag_header.extend(datagram_tag)
else:
# subsequent fragment
dispatch_size = hex2buf("{:02x}".format((self.FRAGN_DISPATCH << 8) | original_length))
offset = [len(fragment_list) * (self.MAX_FRAGMENT_SIZE / 8)]
frag_header.extend(dispatch_size)
frag_header.extend(datagram_tag)
frag_header.extend(offset)
fragment.extend(frag_header)
fragment.extend(ip6_pkt[:frag_len])
fragment_list.append(fragment)
ip6_pkt = ip6_pkt[frag_len:]
# increment the tag for the new set of fragments
self.datagram_tag += 1
log.info("[GATEWAY] Fragmenting incoming IPv6 packet (size: {}) into {} fragments with tag {}".format(
original_length, len(fragment_list), self.datagram_tag - 1))
return fragment_list
| StarcoderdataPython |
25653 | <reponame>ejkim1996/Unity-JSON-Manager<filename>JSONFormatter.py
import json
from tkinter import Tk
from tkinter.filedialog import askopenfilename
# Python script that allows user to select JSON file using TKinter and format it properly.
root = Tk()
filename = askopenfilename()
root.destroy() # Close the window
read = open(filename, 'r')
parsed = json.load(read)
write = open(filename, 'w')
newstr = json.dumps(parsed, indent = 3, sort_keys =True)
write.write(newstr) # Overwrite the old unformatted json file
read.close()
write.close()
| StarcoderdataPython |
3538616 | <gh_stars>1-10
import argparse
import datetime
import re
import validators
from tzlocal import get_localzone
import pnc_cli.common as common
import pnc_cli.utils as utils
from pnc_cli.pnc_api import pnc_api
import requests
bc_name_regex = "^[a-zA-Z0-9_.][a-zA-Z0-9_.-]*(?!\.git)+$"
# BuildConfiguration Types
def valid_bc_name(name_input):
pattern = re.compile(bc_name_regex)
if not pattern.match(name_input):
raise argparse.ArgumentTypeError("name contains invalid characters")
return name_input
def unique_bc_name(name_input):
valid_bc_name(name_input)
if common.get_id_by_name(pnc_api.build_configs, name_input):
raise argparse.ArgumentTypeError("BuildConfiguration name '{}' is already in use".format(name_input))
return name_input
def valid_unique_bc_name(name_input):
unique_bc_name(valid_bc_name(name_input))
return name_input
def existing_bc_name(name_input):
valid_bc_name(name_input)
if not common.get_id_by_name(pnc_api.build_configs, name_input):
raise argparse.ArgumentTypeError("no BuildConfiguration with the name {} exists".format(name_input))
return name_input
def existing_bc_id(id_input):
valid_id(id_input)
if not common.id_exists(pnc_api.build_configs, id_input):
raise argparse.ArgumentTypeError("no BuildConfiguration with ID {} exists".format(id_input))
return id_input
# RepositoryConfiguration Types
def existing_rc_id(id_input):
valid_id(id_input)
if not common.id_exists(pnc_api.repositories, id_input):
raise argparse.ArgumentTypeError("no RepositoryConfiguration with ID {} exists".format(id_input))
return id_input
# Product Types
def existing_product_id(id_input):
valid_id(id_input)
if not common.id_exists(pnc_api.products, id_input):
raise argparse.ArgumentTypeError("no Product with ID {} exists".format(id_input))
return id_input
def existing_product_name(name_input):
if not common.get_id_by_name(pnc_api.products, name_input):
raise argparse.ArgumentTypeError("no Product with the name {} exists".format(name_input))
return name_input
def unique_product_name(name_input):
if common.get_id_by_name(pnc_api.products, name_input):
raise argparse.ArgumentTypeError("a Product with the name {} already exists".format(name_input))
return name_input
def valid_abbreviation(abbreviation_input):
if len(abbreviation_input) < 0 or len(abbreviation_input) > 20:
raise argparse.ArgumentTypeError("a Product abbreviation must be between 0 and 20 characters")
return abbreviation_input
def unique_product_abbreviation(abbreviation_input):
valid_abbreviation(abbreviation_input)
if pnc_api.products.get_all(q='abbreviation==' + abbreviation_input).content:
raise argparse.ArgumentTypeError("a Product with the abbreviation {} already exists".format(abbreviation_input))
return abbreviation_input
# ProductVersion types
def existing_product_version(id_input):
valid_id(id_input)
if not common.id_exists(pnc_api.product_versions, id_input):
raise argparse.ArgumentTypeError("no ProductVersion with ID {} exists".format(id_input))
return id_input
def valid_version_two_digits(version):
if not utils.is_valid_version(version, '^\d+\.\d+'):
raise argparse.ArgumentTypeError("Version should consist of two numeric parts separated by a dot.")
return version
# ProductMilestone types
def existing_product_milestone(id_input):
valid_id(id_input)
if not common.id_exists(pnc_api.product_milestones, id_input):
raise argparse.ArgumentTypeError("no ProductMilestone with ID {} exist".format(id_input))
return id_input
def valid_version_create(version):
if not utils.is_valid_version(version, '^\d+\.\w+$'):
raise argparse.ArgumentTypeError(
"Version must start with a number, followed by a dot and then a qualifier (e.g ER1).")
return version
def valid_version_update(version):
if not utils.is_valid_version(version, '^\d+\.\d+\.\d+\.\w+$'):
raise argparse.ArgumentTypeError(
"The version should consist of three numeric parts and one alphanumeric qualifier each separated by a dot.")
return version
# ProductRelease types
def existing_product_release(id_input):
valid_id(id_input)
if not common.id_exists(pnc_api.product_releases, id_input):
raise argparse.ArgumentTypeError("no ProductRelease with ID {} exists.".format(id_input))
return id_input
# BuildConfigurationSet types
def unique_bc_set_name(name_input):
if common.get_id_by_name(pnc_api.build_group_configs, name_input):
raise argparse.ArgumentTypeError("BuildConfigurationSet name '{}' is already in use".format(name_input))
return name_input
def existing_bc_set_name(name_input):
if not common.get_id_by_name(pnc_api.build_group_configs, name_input):
raise argparse.ArgumentTypeError("no BuildConfigurationSet with the name {} exists".format(name_input))
return name_input
def existing_bc_set_id(id_input):
valid_id(id_input)
if not common.id_exists(pnc_api.build_group_configs, id_input):
raise argparse.ArgumentTypeError("no BuildConfigurationSet with ID {} exists".format(id_input))
return id_input
# BuildEnvironmentTypes
def existing_environment_id(id_input):
valid_id(id_input)
if not common.id_exists(pnc_api.environments, id_input):
raise argparse.ArgumentTypeError("no BuildEnvironment exists with id {}".format(id_input))
return id_input
def existing_environment_name(name_input):
if not common.get_id_by_name(pnc_api.environments, name_input):
raise argparse.ArgumentTypeError("no BuildEnvironment exists with name {}".format(name_input))
return name_input
def unique_environment_name(nameInput):
if common.get_id_by_name(pnc_api.environments, nameInput):
raise argparse.ArgumentTypeError("a BuildEnvironment with name {} already exists".format(nameInput))
return nameInput
# Project Types
def existing_project_id(id_input):
valid_id(id_input)
if not common.id_exists(pnc_api.projects, id_input):
raise argparse.ArgumentTypeError("no Project with ID {} exists".format(id_input))
return id_input
def existing_project_name(name_input):
if not common.get_id_by_name(pnc_api.projects, name_input):
raise argparse.ArgumentTypeError("no Project with name {} exists".format(name_input))
return name_input
def unique_project_name(name_input):
if common.get_id_by_name(pnc_api.projects, name_input):
raise argparse.ArgumentTypeError("a Project with name {} already exists".format(name_input))
return name_input
# BuildConfigurationSetRecord types
def existing_bc_set_record(id_input):
valid_id(id_input)
if not common.id_exists(pnc_api.build_groups, id_input):
raise argparse.ArgumentTypeError("no BuildConfigurationSetRecord with ID {} exists".format(id_input))
return id_input
# BuildRecord types
def existing_build_record(id_input):
valid_id(id_input)
if not common.id_exists(pnc_api.builds, id_input):
raise argparse.ArgumentTypeError("no BuildRecord with ID {} exists".format(id_input))
return id_input
def existing_built_artifact(id_input):
pass
# License types
def existing_license(id_input):
valid_id(id_input)
if not common.id_exists(pnc_api.licenses, id_input):
raise argparse.ArgumentTypeError("no License with ID {} exists".format(id_input))
return id_input
# Running build records types
def existing_running_build(id_input):
valid_id(id_input)
if not common.id_exists(pnc_api.running_builds, id_input):
raise argparse.ArgumentTypeError("no RunningBuild with ID {} exists".format(id_input))
return id_input
# Misc types
def valid_date(dateInput):
try:
dateInput = get_localzone().localize(datetime.datetime.strptime(dateInput, '%Y-%m-%d'))
except ValueError:
raise argparse.ArgumentTypeError("Date format: yyyy-mm-dd")
return dateInput
def valid_id(id_input):
if not id_input.isdigit():
raise argparse.ArgumentTypeError("An ID must be a positive integer")
return id_input
def valid_url(urlInput):
if not validators.url(urlInput):
raise argparse.ArgumentTypeError("Invalid url")
return urlInput
def valid_git_url(urlInput):
# replace git protocol with http so we can validate it as url
valid_url(re.sub(r'^(git\+ssh|ssh|git)','http',urlInput))
return urlInput
def t_or_f(arg):
ua = str(arg).upper()
if 'TRUE'.startswith(ua):
return True
elif 'FALSE'.startswith(ua):
return False
else:
raise argparse.ArgumentTypeError("Only true or false is possible.")
| StarcoderdataPython |
5023421 | from _pytest.config import Config
from _pytest.monkeypatch import MonkeyPatch
from infrastructure.utility.plugin_utilities import can_plugin_be_registered
def test_plugin_register_help(pytestconfig: Config, monkeypatch: MonkeyPatch) -> None:
monkeypatch.setattr(pytestconfig.option, "help", True)
assert not can_plugin_be_registered(pytestconfig)
def test_plugin_register_collect(
pytestconfig: Config, monkeypatch: MonkeyPatch
) -> None:
monkeypatch.setattr(pytestconfig.option, "collectonly", True)
assert not can_plugin_be_registered(pytestconfig)
def test_plugin_register_xdist(pytestconfig: Config, monkeypatch: MonkeyPatch) -> None:
pytestconfig.workerinput = []
assert not can_plugin_be_registered(pytestconfig)
| StarcoderdataPython |
11237740 | <reponame>rec/leds<filename>test/bibliopixel/project/make.py<gh_stars>100-1000
import tempfile
from unittest.mock import patch
from bibliopixel.project import project
from bibliopixel.util import data_file
from .. mark_tests import SKIP_LONG_TESTS
def make_project(data):
if isinstance(data, dict):
desc = data
name = None
elif not isinstance(data, str):
raise ValueError('Cannot understand data %s' % data)
else:
if '{' in data or ':' in data:
fp = tempfile.NamedTemporaryFile(mode='w')
fp.write(data)
fp.seek(0)
name = fp.name
else:
name = data
desc = data_file.load(name)
return project.project(desc, root_file=name)
def make(data, run_start=not SKIP_LONG_TESTS):
project = make_project(data)
if run_start:
with patch('time.sleep', autospec=True):
project.animation.start()
return project.animation
| StarcoderdataPython |
11283346 | <reponame>ipussy/ipussy.github.io
#!/usr/bin/env python3
import smtplib
from datetime import datetime
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
def sendReportEmail(subject, message):
mailContent = message
#The mail addresses and password
senderAddress = '<EMAIL>'
senderPass = '<PASSWORD>'
receiverAddress = '<EMAIL>'
# now = datetime.now()
# subject = '[Ps] ' + str(itemNumber) + ' items (' + now.strftime("%Hh%M %d/%m") + ')'
# if message.find('[404]') >= 0:
# subject = '[Ps] 404! report at ' + now.strftime("%Hh%M %d/%m")
#Setup the MIME
message = MIMEMultipart()
message['From'] = senderAddress
message['To'] = receiverAddress
message['Subject'] = subject
#The body and the attachments for the mail
message.attach(MIMEText(mailContent, 'plain'))
#Create SMTP session for sending the mail
session = smtplib.SMTP('smtp.gmail.com', 587) #use gmail with port
session.starttls() #enable security
session.login(senderAddress, senderPass) #login with mail_id and password
text = message.as_string()
session.sendmail(senderAddress, receiverAddress, text)
session.quit()
print('Mail Sent.') | StarcoderdataPython |
3369488 | import time
from PIL import Image, ImageDraw
from genetic import Myimage, ImagePopulation
if __name__ == "__main__":
current_generation = 0
current_evolve = 0
start_time = time.time()
refer_image = Myimage.from_file(filename="sample.png")
ImagePopulation.REFRENCE_IMAGE = refer_image
initial_population = ImagePopulation()
initial_population.random_population()
initial_population.update_image()
while 1:
new_population = initial_population.copy()
new_population.mutate()
if new_population.dirty:
# print "new_population.dirty:%s"%new_population.dirty
current_generation += 1
# fintness =
new_population.update_image()
## calculate the fitness
print "new_population.fitness:%s"%new_population.fitness
if new_population.fitness < initial_population.fitness:
## need
current_evolve += 1
print "current_evolve:%s"%current_evolve
initial_population = new_population
if current_evolve % 100 == 0:
new_population.save_image(name="%s.jpg"%current_evolve)
| StarcoderdataPython |
4900359 | # Generated by Django 3.2.7 on 2021-10-14 17:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meetup', '0007_auto_20211014_2234'),
]
operations = [
migrations.AddField(
model_name='meetup',
name='date',
field=models.DateField(default='2021-04-12'),
preserve_default=False,
),
migrations.AddField(
model_name='meetup',
name='organizer_email',
field=models.EmailField(default='<EMAIL>', max_length=254),
preserve_default=False,
),
]
| StarcoderdataPython |
11332815 | <filename>hs_tools_resource/migrations/0013_auto_20171023_1724.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hs_tools_resource', '0012_toolmetadata_approved'),
]
operations = [
migrations.AlterModelOptions(
name='toolmetadata',
options={'verbose_name': 'Application Approval', 'verbose_name_plural': 'Application Approvals'},
),
]
| StarcoderdataPython |
1614426 | <filename>Chapter 10/cellphone.py
# The CellPhone class holds data about a cell phone.
class CellPhone:
# The __init__ method initializes the attributes.
def __init__(self, manufact, model, price):
self.__manufact = manufact
self.__model = model
self.__retail_price = price
# The set_manufact method accepts an argument for
# the phone's manufacturer.
def set_manufact(self, manufact):
self.__manufact = manufact
# The set_model method accepts an argument for
# the phone's model number.
def set_model(self, model):
self.__model = model
# The set_retail_price method accepts an argument
# for the phone's retail price.
def set_retail_price(self, price):
self.__retail_price = price
# The get_manufact method returns the
# phone's manufacturer.
def get_manufact(self):
return self.__manufact
# The get_model method returns the
# phone's model number.
def get_model(self):
return self.__model
# The get_retail_price method returns the
# phone's retail price.
def get_retail_price(self):
return self.__retail_price
| StarcoderdataPython |
231828 | import unittest
from unittest.mock import patch, Mock
import os
from . import core
class TestGetFileExtension(unittest.TestCase):
def setUp(self):
self._exefilename = "main.exe"
self._shellfilename = "script.sh"
self._noextfilename = "program"
self._pyfilename = "mymodule.py"
self._path = os.path.join("this", "is", "a", "path")
def test_exe(self):
self.assertEqual(core.get_file_extension(self._exefilename), 'exe')
def test_shell(self):
self.assertEqual(core.get_file_extension(self._shellfilename), 'sh')
def test_noext(self):
self.assertEqual(core.get_file_extension(self._noextfilename), '')
def test_py(self):
self.assertEqual(core.get_file_extension(self._pyfilename), 'py')
def test_exe_in_path(self):
filename = os.path.join(self._path, self._exefilename)
self.assertEqual(core.get_file_extension(filename), 'exe')
def test_shell_in_path(self):
filename = os.path.join(self._path, self._shellfilename)
self.assertEqual(core.get_file_extension(filename), 'sh')
def test_noext_in_path(self):
filename = os.path.join(self._path, self._noextfilename)
self.assertEqual(core.get_file_extension(filename), '')
def test_py_in_path(self):
filename = os.path.join(self._path, self._pyfilename)
self.assertEqual(core.get_file_extension(filename), 'py')
class TestIsExecutable(unittest.TestCase):
@patch("sutils.applications.process.core.os.access")
def test_calls_os_access(self, mock_access):
core.is_executable("filename")
mock_access.assert_called_once_with("filename", os.X_OK)
@patch("sutils.applications.process.core.os.access")
def test_returns_os_access(self, mock_access):
mock_access.return_value = "this is the return value"
ret = core.is_executable("filename")
self.assertEqual(ret, mock_access.return_value)
class TestRunProcess(unittest.TestCase):
@patch("sutils.applications.process.core.get_file_extension")
@patch("sutils.applications.process.core.is_executable", Mock())
@patch("sutils.processing.process.get_processor", Mock())
def test_calls_get_file_extension(self, mock_get_file_extension):
core.run_process("filename")
mock_get_file_extension.assert_called_once_with("filename")
@patch("sutils.applications.process.core.get_file_extension")
@patch("sutils.applications.process.core.is_executable")
@patch("sutils.processing.process.get_processor", Mock())
def test_calls_is_executable(self, mock_is_executable, mock_get_file_extension):
core.run_process("filename")
mock_is_executable.assert_called_once_with("filename")
@patch("sutils.applications.process.core.is_executable")
@patch("sutils.processing.process.get_processor")
def test_calls_get_processor_exec(self, mock_get_processor, mock_is_executable):
mock_is_executable.return_value = True
core.run_process("filename")
mock_get_processor.assert_called_once_with("external", "filename", root_path='.', config_file='config.ini')
@patch("sutils.applications.process.core.is_executable")
@patch("sutils.processing.process.get_processor")
def test_calls_get_processor_noexec_shell(self, mock_get_processor, mock_is_executable):
mock_is_executable.return_value = False
core.run_process("filename.sh")
mock_get_processor.assert_called_once_with("bash", "filename.sh", root_path='.', config_file='config.ini')
@patch("sutils.applications.process.core.is_executable")
@patch("sutils.processing.process.get_processor")
def test_calls_get_processor_noexec_py(self, mock_get_processor, mock_is_executable):
mock_is_executable.return_value = False
core.run_process("filename.py")
mock_get_processor.assert_called_once_with("python.script", "filename.py", root_path='.', config_file='config.ini')
@patch("sutils.processing.process.get_processor")
def test_calls_process_on_processor(self, mock_get_processor):
core.run_process("filename.py")
mock_get_processor.return_value.run.assert_called_once_with()
| StarcoderdataPython |
131129 | <filename>rusel/base/dir_forms.py
from django import forms
class UploadForm(forms.Form):
upload = forms.FileField()
| StarcoderdataPython |
5042936 | <reponame>e-k-m/thingstodo
import os
import flask
def main():
os.environ["FLASK_APP"] = "thingstodo"
flask.cli.main()
if __name__ == "__main__":
main()
| StarcoderdataPython |
60124 | <reponame>lipovsek/pytea<gh_stars>0
# This sample tests the type checker's "type var scoring" mechanism
# whereby it attempts to solve type variables with the simplest
# possible solution.
from typing import Union, List, TypeVar, Type
T = TypeVar("T")
def to_list1(obj_type: Type[T], obj: Union[List[T], T]) -> List[T]:
return []
def to_list2(obj_type: Type[T], obj: Union[T, List[T]]) -> List[T]:
return []
input_list: List[str] = ["string"]
# The expression on the RHS can satisfy the type variable T
# with either the type str or Union[List[str], str]. It should
# pick the simpler of the two.
output_list1 = to_list1(str, input_list)
verify_type1: List[str] = output_list1
# The resulting type should not depend on the order of the union
# elements.
output_list2 = to_list2(str, input_list)
verify_type2: List[str] = output_list2
| StarcoderdataPython |
210217 | <reponame>abivilion/Hackerank-Solutions-
def print_formatted(number):
l = len(str(bin(number)[2:]))
for i in range(1,number+1):
print(str(i).rjust(l) + ' ' + oct(i)[2:].rjust(l) + ' '
+ hex(i)[2:].upper().rjust(l) + ' ' + bin(i)[2:].rjust(l))
| StarcoderdataPython |
6425838 | <gh_stars>0
import config
import os
from flask import Flask
from flask_cors import CORS
from flask_sqlalchemy import SQLAlchemy
# App instantiation
app = Flask(__name__)
app.config.from_object(os.environ['APP_SETTINGS'])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
# enable CORS
CORS(app, resources={r'/*': {'origins': '*'}})
# Heroku check
ON_HEROKU = 'ON_HEROKU' in os.environ
# Need to add to Procfile and only import clock when not on heroku for debugging
if not ON_HEROKU:
from server import clock
from server import routes | StarcoderdataPython |
8049189 | """
An auto-scaling service specific to Assemblyline services.
"""
import threading
from collections import defaultdict
from string import Template
from typing import Dict, List
import os
import math
import platform
import time
import sched
from assemblyline.remote.datatypes.queues.named import NamedQueue
from assemblyline.remote.datatypes.queues.priority import PriorityQueue
from assemblyline.common.forge import get_service_queue
from assemblyline.remote.datatypes.exporting_counter import export_metrics_once
from assemblyline.odm.messages.scaler_heartbeat import Metrics
from assemblyline.odm.messages.scaler_status_heartbeat import Status
from assemblyline.remote.datatypes.hash import ExpiringHash
from assemblyline.common.constants import SCALER_TIMEOUT_QUEUE, SERVICE_STATE_HASH, ServiceStatus
from assemblyline.odm.models.service import Service, DockerConfig
from assemblyline_core.scaler.controllers import KubernetesController
from assemblyline_core.scaler.controllers.interface import ServiceControlError
from assemblyline_core.server_base import CoreBase, ServiceStage
from .controllers import DockerController
from . import collection
# How often (in seconds) to download new service data, try to scale managed services,
# and download more metrics data respectively
SERVICE_SYNC_INTERVAL = 30
PROCESS_TIMEOUT_INTERVAL = 30
SCALE_INTERVAL = 5
METRIC_SYNC_INTERVAL = 0.5
SERVICE_STATUS_FLUSH = 5
CONTAINER_EVENTS_LOG_INTERVAL = 2
HEARTBEAT_INTERVAL = 5
# How many times to let a service generate an error in this module before we disable it.
# This is only for analysis services, core services we keep retrying forever
MAXIMUM_SERVICE_ERRORS = 5
ERROR_EXPIRY_INTERVAL = 60*60 # how long we wait before we forgive an error. (seconds)
# An environment variable that should be set when we are started with kubernetes, tells us how to attach
# the global Assemblyline config to new things that we launch.
KUBERNETES_AL_CONFIG = os.environ.get('KUBERNETES_AL_CONFIG')
HOSTNAME = os.getenv('HOSTNAME', platform.node())
NAMESPACE = os.getenv('NAMESPACE', 'al')
CLASSIFICATION_HOST_PATH = os.getenv('CLASSIFICATION_HOST_PATH', None)
CLASSIFICATION_CONFIGMAP = os.getenv('CLASSIFICATION_CONFIGMAP', None)
CLASSIFICATION_CONFIGMAP_KEY = os.getenv('CLASSIFICATION_CONFIGMAP_KEY', 'classification.yml')
class ServiceProfile:
"""A profile, describing a currently running service.
This includes how the service should be run, and conditions related to the scaling of the service.
"""
def __init__(self, name, container_config: DockerConfig, config_hash=0, min_instances=0, max_instances=None,
growth=600, shrink=None, backlog=500, queue=None, shutdown_seconds=30):
"""
:param name: Name of the service to manage
:param container_config: Instructions on how to start this service
:param min_instances: The minimum number of copies of this service keep running
:param max_instances: The maximum number of copies permitted to be running
:param growth: Delay before growing a service, unit-less, approximately seconds
:param shrink: Delay before shrinking a service, unit-less, approximately seconds, defaults to -growth
:param backlog: How long a queue backlog should be before it takes `growth` seconds to grow.
:param queue: Queue name for monitoring
"""
self.name = name
self.queue: PriorityQueue = queue
self.container_config = container_config
self.target_duty_cycle = 0.9
self.shutdown_seconds = shutdown_seconds
self.config_hash = config_hash
# How many instances we want, and can have
self.min_instances = self._min_instances = max(0, int(min_instances))
self._max_instances = max(0, int(max_instances)) if max_instances else float('inf')
self.desired_instances: int = 0
self.running_instances: int = 0
# Information tracking when we want to grow/shrink
self.pressure: float = 0
self.growth_threshold = abs(float(growth))
self.shrink_threshold = -self.growth_threshold/2 if shrink is None else -abs(float(shrink))
self.leak_rate: float = 0.1
# How long does a backlog need to be before we are concerned
self.backlog = int(backlog)
self.queue_length = 0
self.duty_cycle = 0
self.last_update = 0
@property
def cpu(self):
return self.container_config.cpu_cores
@property
def ram(self):
return self.container_config.ram_mb
@property
def instance_limit(self):
if self._max_instances == float('inf'):
return 0
return self._max_instances
@property
def max_instances(self):
# Adjust the max_instances based on the number that is already running
# this keeps the scaler from running way ahead with its demands when resource caps are reached
return min(self._max_instances, self.running_instances + 2)
def update(self, delta, instances, backlog, duty_cycle):
self.last_update = time.time()
self.running_instances = instances
self.queue_length = backlog
self.duty_cycle = duty_cycle
# Adjust min instances based on queue (if something has min_instances == 0, bump it up to 1 when
# there is anything in the queue) this should have no effect for min_instances > 0
self.min_instances = max(self._min_instances, int(bool(backlog)))
self.desired_instances = max(self.min_instances, min(self.max_instances, self.desired_instances))
# Should we scale up because of backlog
self.pressure += delta * math.sqrt(backlog/self.backlog)
# Should we scale down due to duty cycle? (are some of the workers idle)
self.pressure -= delta * (self.target_duty_cycle - duty_cycle)/self.target_duty_cycle
# Apply the friction, tendency to do nothing, move the change pressure gradually to the center.
leak = min(self.leak_rate * delta, abs(self.pressure))
self.pressure = math.copysign(abs(self.pressure) - leak, self.pressure)
# When we are already at the minimum number of instances, don't let negative values build up
# otherwise this can cause irregularities in scaling response around the min_instances
if self.desired_instances == self.min_instances:
self.pressure = max(0.0, self.pressure)
if self.pressure >= self.growth_threshold:
self.desired_instances = min(self.max_instances, self.desired_instances + 1)
self.pressure = 0
if self.pressure <= self.shrink_threshold:
self.desired_instances = max(self.min_instances, self.desired_instances - 1)
self.pressure = 0
class ScalerServer(CoreBase):
def __init__(self, config=None, datastore=None, redis=None, redis_persist=None):
super().__init__('assemblyline.scaler', config=config, datastore=datastore,
redis=redis, redis_persist=redis_persist)
self.scaler_timeout_queue = NamedQueue(SCALER_TIMEOUT_QUEUE, host=self.redis_persist)
self.error_count = {}
self.status_table = ExpiringHash(SERVICE_STATE_HASH, host=self.redis, ttl=30*60)
labels = {
'app': 'assemblyline',
'section': 'service',
}
if KUBERNETES_AL_CONFIG:
self.log.info(f"Loading Kubernetes cluster interface on namespace: {NAMESPACE}")
self.controller = KubernetesController(logger=self.log, prefix='alsvc_', labels=labels,
namespace=NAMESPACE, priority='al-service-priority')
# If we know where to find it, mount the classification into the service containers
if CLASSIFICATION_CONFIGMAP:
self.controller.config_mount('classification-config', config_map=CLASSIFICATION_CONFIGMAP,
key=CLASSIFICATION_CONFIGMAP_KEY,
target_path='/etc/assemblyline/classification.yml')
else:
self.log.info("Loading Docker cluster interface.")
self.controller = DockerController(logger=self.log, prefix=NAMESPACE,
cpu_overallocation=self.config.core.scaler.cpu_overallocation,
memory_overallocation=self.config.core.scaler.memory_overallocation,
labels=labels)
# If we know where to find it, mount the classification into the service containers
if CLASSIFICATION_HOST_PATH:
self.controller.global_mounts.append((CLASSIFICATION_HOST_PATH, '/etc/assemblyline/classification.yml'))
self.profiles: Dict[str, ServiceProfile] = {}
# Prepare a single threaded scheduler
self.state = collection.Collection(period=self.config.core.metrics.export_interval)
self.scheduler = sched.scheduler()
self.scheduler_stopped = threading.Event()
def add_service(self, profile: ServiceProfile):
profile.desired_instances = max(self.controller.get_target(profile.name), profile.min_instances)
profile.running_instances = profile.desired_instances
self.log.debug(f'Starting service {profile.name} with a target of {profile.desired_instances}')
profile.last_update = time.time()
self.profiles[profile.name] = profile
self.controller.add_profile(profile)
def try_run(self):
# Do an initial call to the main methods, who will then be registered with the scheduler
self.sync_services()
self.sync_metrics()
self.update_scaling()
self.expire_errors()
self.process_timeouts()
self.export_metrics()
self.flush_service_status()
self.log_container_events()
self.heartbeat()
# Run as long as we need to
while self.running:
delay = self.scheduler.run(False)
time.sleep(min(delay, 2))
self.scheduler_stopped.set()
def stop(self):
super().stop()
self.scheduler_stopped.wait(5)
self.controller.stop()
def heartbeat(self):
"""Periodically touch a file on disk.
Since tasks are run serially, the delay between touches will be the maximum of
HEARTBEAT_INTERVAL and the longest running task.
"""
if self.config.logging.heartbeat_file:
self.scheduler.enter(HEARTBEAT_INTERVAL, 0, self.heartbeat)
super().heartbeat()
def sync_services(self):
self.scheduler.enter(SERVICE_SYNC_INTERVAL, 0, self.sync_services)
default_settings = self.config.core.scaler.service_defaults
image_variables = defaultdict(str)
image_variables.update(self.config.services.image_variables)
current_services = set(self.profiles.keys())
discovered_services = []
# Get all the service data
for service in self.datastore.list_all_services(full=True):
service: Service = service
name = service.name
stage = self.get_service_stage(service.name)
discovered_services.append(name)
# noinspection PyBroadException
try:
if service.enabled and stage == ServiceStage.Off:
# Enable this service's dependencies
self.controller.prepare_network(service.name, service.docker_config.allow_internet_access)
for _n, dependency in service.dependencies.items():
self.controller.start_stateful_container(
service_name=service.name,
container_name=_n,
spec=dependency,
labels={'dependency_for': service.name}
)
# Move to the next service stage
if service.update_config and service.update_config.wait_for_update:
self._service_stage_hash.set(name, ServiceStage.Update)
else:
self._service_stage_hash.set(name, ServiceStage.Running)
if not service.enabled:
self.stop_service(service.name, stage)
continue
# Check that all enabled services are enabled
if service.enabled and stage == ServiceStage.Running:
# Compute a hash of service properties not include in the docker config, that
# should still result in a service being restarted when changed
config_hash = hash(str(sorted(service.config.items())))
config_hash = hash((config_hash, str(service.submission_params)))
# Build the docker config for the service, we are going to either create it or
# update it so we need to know what the current configuration is either way
docker_config = service.docker_config
docker_config.image = Template(docker_config.image).safe_substitute(image_variables)
set_keys = set(var.name for var in docker_config.environment)
for var in default_settings.environment:
if var.name not in set_keys:
docker_config.environment.append(var)
# Add the service to the list of services being scaled
if name not in self.profiles:
self.log.info(f'Adding {service.name} to scaling')
self.add_service(ServiceProfile(
name=name,
min_instances=default_settings.min_instances,
growth=default_settings.growth,
shrink=default_settings.shrink,
config_hash=config_hash,
backlog=default_settings.backlog,
max_instances=service.licence_count,
container_config=docker_config,
queue=get_service_queue(name, self.redis),
shutdown_seconds=service.timeout + 30, # Give service an extra 30 seconds to upload results
))
# Update RAM, CPU, licence requirements for running services
else:
profile = self.profiles[name]
if profile.container_config != docker_config or profile.config_hash != config_hash:
self.log.info(f"Updating deployment information for {name}")
profile.container_config = docker_config
profile.config_hash = config_hash
self.controller.restart(profile)
self.log.info(f"Deployment information for {name} replaced")
if service.licence_count == 0:
profile._max_instances = float('inf')
else:
profile._max_instances = service.licence_count
except Exception:
self.log.exception(f"Error applying service settings from: {service.name}")
self.handle_service_error(service.name)
# Find any services we have running, that are no longer in the database and remove them
for stray_service in current_services - set(discovered_services):
stage = self.get_service_stage(stray_service)
self.stop_service(stray_service, stage)
def stop_service(self, name, current_stage):
if current_stage != ServiceStage.Off:
# Disable this service's dependencies
self.controller.stop_containers(labels={
'dependency_for': name
})
# Mark this service as not running in the shared record
self._service_stage_hash.set(name, ServiceStage.Off)
# Stop any running disabled services
if name in self.profiles or self.controller.get_target(name) > 0:
self.log.info(f'Removing {name} from scaling')
self.controller.set_target(name, 0)
self.profiles.pop(name, None)
def update_scaling(self):
"""Check if we need to scale any services up or down."""
self.scheduler.enter(SCALE_INTERVAL, 0, self.update_scaling)
try:
# Figure out what services are expected to be running and how many
profiles: List[ServiceProfile] = list(self.profiles.values())
targets = {_p.name: self.controller.get_target(_p.name) for _p in profiles}
for name, profile in self.profiles.items():
self.log.debug(f'{name}')
self.log.debug(f'Instances \t{profile.min_instances} < {profile.desired_instances} | '
f'{targets[name]} < {profile.max_instances}')
self.log.debug(
f'Pressure \t{profile.shrink_threshold} < {profile.pressure} < {profile.growth_threshold}')
#
# 1. Any processes that want to release resources can always be approved first
#
for name, profile in self.profiles.items():
if targets[name] > profile.desired_instances:
self.log.info(f"{name} wants less resources changing allocation "
f"{targets[name]} -> {profile.desired_instances}")
self.controller.set_target(name, profile.desired_instances)
targets[name] = profile.desired_instances
if not self.running:
return
#
# 2. Any processes that aren't reaching their min_instances target must be given
# more resources before anyone else is considered.
#
for name, profile in self.profiles.items():
if targets[name] < profile.min_instances:
self.log.info(f"{name} isn't meeting minimum allocation "
f"{targets[name]} -> {profile.min_instances}")
self.controller.set_target(name, profile.min_instances)
targets[name] = profile.min_instances
#
# 3. Try to estimate available resources, and based on some metric grant the
# resources to each service that wants them. While this free memory
# pool might be spread across many nodes, we are going to treat it like
# it is one big one, and let the orchestration layer sort out the details.
#
free_cpu = self.controller.free_cpu()
free_memory = self.controller.free_memory()
#
def trim(prof: List[ServiceProfile]):
prof = [_p for _p in prof if _p.desired_instances > targets[_p.name]]
drop = [_p for _p in prof if _p.cpu > free_cpu or _p.ram > free_memory]
if drop:
drop = {_p.name: (_p.cpu, _p.ram) for _p in drop}
self.log.debug(f"Can't make more because not enough resources {drop}")
prof = [_p for _p in prof if _p.cpu <= free_cpu and _p.ram <= free_memory]
return prof
profiles = trim(profiles)
while profiles:
# TODO do we need to add balancing metrics other than 'least running' for this? probably
if True:
profiles.sort(key=lambda _p: self.controller.get_target(_p.name))
# Add one for the profile at the bottom
free_memory -= profiles[0].container_config.ram_mb
free_cpu -= profiles[0].container_config.cpu_cores
targets[profiles[0].name] += 1
# profiles = [_p for _p in profiles if _p.desired_instances > targets[_p.name]]
# profiles = [_p for _p in profiles if _p.cpu < free_cpu and _p.ram < free_memory]
profiles = trim(profiles)
# Apply those adjustments we have made back to the controller
for name, value in targets.items():
old = self.controller.get_target(name)
if value != old:
self.log.info(f"Scaling service {name}: {old} -> {value}")
self.controller.set_target(name, value)
if not self.running:
return
except ServiceControlError as error:
self.log.exception("Error while scaling services.")
self.handle_service_error(error.service_name)
def handle_service_error(self, service_name):
"""Handle an error occurring in the *analysis* service.
Errors for core systems should simply be logged, and a best effort to continue made.
For analysis services, ignore the error a few times, then disable the service.
"""
self.error_count[service_name] = self.error_count.get(service_name, 0) + 1
if self.error_count[service_name] >= MAXIMUM_SERVICE_ERRORS:
self.datastore.service_delta.update(service_name, [
(self.datastore.service_delta.UPDATE_SET, 'enabled', False)
])
del self.error_count[service_name]
def sync_metrics(self):
"""Check if there are any pubsub messages we need."""
self.scheduler.enter(METRIC_SYNC_INTERVAL, 3, self.sync_metrics)
# Pull service metrics from redis
service_data = self.status_table.items()
for host, (service, state, time_limit) in service_data.items():
# If an entry hasn't expired, take it into account
if time.time() < time_limit:
self.state.update(service=service, host=host, throughput=0,
busy_seconds=METRIC_SYNC_INTERVAL if state == ServiceStatus.Running else 0)
# If an entry expired a while ago, the host is probably not in use any more
if time.time() > time_limit + 600:
self.status_table.pop(host)
# Check the set of services that might be sitting at zero instances, and if it is, we need to
# manually check if it is offline
export_interval = self.config.core.metrics.export_interval
for profile_name, profile in self.profiles.items():
# Pull out statistics from the metrics regularization
update = self.state.read(profile_name)
if update:
delta = time.time() - profile.last_update
profile.update(
delta=delta,
backlog=profile.queue.length(),
**update
)
# Check if we expect no messages, if so pull the queue length ourselves since there is no heartbeat
if self.controller.get_target(profile_name) == 0 and profile.desired_instances == 0 and profile.queue:
queue_length = profile.queue.length()
if queue_length > 0:
self.log.info(f"Service at zero instances has messages: "
f"{profile.name} ({queue_length} in queue)")
profile.update(
delta=export_interval,
instances=0,
backlog=queue_length,
duty_cycle=profile.target_duty_cycle
)
# TODO maybe find another way of implementing this that is less aggressive
# for profile_name, profile in self.profiles.items():
# # In the case that there should actually be instances running, but we haven't gotten
# # any heartbeat messages we might be waiting for a container that can't start properly
# if self.services.controller.get_target(profile_name) > 0:
# if time.time() - profile.last_update > profile.shutdown_seconds:
# self.log.error(f"Starting service {profile_name} has timed out "
# f"({time.time() - profile.last_update} > {profile.shutdown_seconds} seconds)")
#
# # Disable the the service
# self.datastore.service_delta.update(profile_name, [
# (self.datastore.service_delta.UPDATE_SET, 'enabled', False)
# ])
def expire_errors(self):
self.scheduler.enter(ERROR_EXPIRY_INTERVAL, 0, self.expire_errors)
self.error_count = {name: err - 1 for name, err in self.error_count.items() if err > 1}
def process_timeouts(self):
self.scheduler.enter(PROCESS_TIMEOUT_INTERVAL, 0, self.process_timeouts)
while True:
message = self.scaler_timeout_queue.pop(blocking=False)
if not message:
break
# noinspection PyBroadException
try:
self.log.info(f"Killing service container: {message['container']} running: {message['service']}")
self.controller.stop_container(message['service'], message['container'])
except Exception:
self.log.exception(f"Exception trying to stop timed out service container: {message}")
def export_metrics(self):
self.scheduler.enter(self.config.logging.export_interval, 0, self.export_metrics)
for service_name, profile in self.profiles.items():
metrics = {
'running': profile.running_instances,
'target': profile.desired_instances,
'minimum': profile.min_instances,
'maximum': profile.instance_limit,
'dynamic_maximum': profile.max_instances,
'queue': profile.queue_length,
'duty_cycle': profile.duty_cycle,
'pressure': profile.pressure
}
export_metrics_once(service_name, Status, metrics, host=HOSTNAME, counter_type='scaler-status',
config=self.config, redis=self.redis)
memory, memory_total = self.controller.memory_info()
cpu, cpu_total = self.controller.cpu_info()
metrics = {
'memory_total': memory_total,
'cpu_total': cpu_total,
'memory_free': memory,
'cpu_free': cpu
}
export_metrics_once('scaler', Metrics, metrics, host=HOSTNAME,
counter_type='scaler', config=self.config, redis=self.redis)
def flush_service_status(self):
"""The service status table may have references to containers that have crashed. Try to remove them all."""
self.scheduler.enter(SERVICE_STATUS_FLUSH, 0, self.flush_service_status)
# Pull all container names
names = set(self.controller.get_running_container_names())
# Get the names we have status for
for hostname in self.status_table.keys():
if hostname not in names:
self.status_table.pop(hostname)
def log_container_events(self):
"""The service status table may have references to containers that have crashed. Try to remove them all."""
self.scheduler.enter(CONTAINER_EVENTS_LOG_INTERVAL, 0, self.log_container_events)
for message in self.controller.new_events():
self.log.warning("Container Event :: " + message)
| StarcoderdataPython |
11365914 | <reponame>SkyTruth/pelagos-data
#!/usr/bin/env python
# This document is part of Pelagos Data
# https://github.com/skytruth/pelagos-data
# =========================================================================== #
#
# The MIT License (MIT)
#
# Copyright (c) 2014 SkyTruth
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# =========================================================================== #
"""
Concatenate files
"""
from __future__ import unicode_literals
from glob import glob
from os.path import abspath, expanduser, isfile, dirname
from . import components
from .components import *
from ..controller import *
from ..common import *
from .. import raw
#/* ======================================================================= */#
#/* Document level information
#/* ======================================================================= */#
__all__ = ['print_usage', 'print_help', 'print_long_usage', 'main']
UTIL_NAME = 'catfiles.py'
#/* ======================================================================= */#
#/* Define print_usage() function
#/* ======================================================================= */#
def print_usage():
"""
Print commandline usage information
Returns:
1 for exit code purposes
"""
global UTIL_NAME
# TODO: Populate usage
vprint("""
{0} [--help-info] [-q] [-sl n] [-s schema] [-se]
{1} [-m write_mode] ofile ifile [ifile ...]
""".format(UTIL_NAME, " " * len(UTIL_NAME)))
return 1
#/* ======================================================================= */#
#/* Define print_long_usage() function
#/* ======================================================================= */#
def print_long_usage():
"""
Print full commandline usage information
Returns:
1 for exit code purposes
"""
print_usage()
vprint("""Options:
-q -quiet Suppress all output
-m -mode Write mode for updating file: 'w' for overwrite, or 'a'
for append
[default: w]
-s -schema Output file header as: field1,field2,...
To skip writing a schema, use an empty string
[default: {0}]
-sl -skip-lines Skip n lines of each input file
[default: 0]
-se -skip-empty Skip concatenating completely empty lines
ofile Target file
ifile Input file to be concatenated. See --help for information
about getting around the command line's argument limit when
attempting to process a large number of files. Using '-'
as an input file causes
""".format(','.join(raw.RAW_SCHEMA)))
return 1
#/* ======================================================================= */#
#/* Define print_help() function
#/* ======================================================================= */#
def print_help():
"""
Detailed help information
Returns:
1 for exit code purposes
"""
global UTIL_NAME
# TODO: Populate help
vprint("""
Help: {0}
------{1}
{2}
""".format(UTIL_NAME, '-' * len(UTIL_NAME), main.__doc__))
return 1
#/* ======================================================================= */#
#/* Define main() function
#/* ======================================================================= */#
def main(args):
"""
Concatenate multiple files into a single target file. Target file can be
overwritten or updated according to the write mode supplied via -mode, which
directly sets the mode in Python's open() function.
If a command line error stating the argument list is too long is encountered
switch to using quoted wildcard paths, which lets Python handle the argument
expansion.
"""
#/* ----------------------------------------------------------------------- */#
#/* Print usage
#/* ----------------------------------------------------------------------- */#
if len(args) is 0:
return print_usage()
#/* ----------------------------------------------------------------------- */#
#/* Defaults
#/* ----------------------------------------------------------------------- */#
output_schema = raw.RAW_SCHEMA
write_mode = 'w'
skip_lines = 0
skip_empty_lines = False
#/* ----------------------------------------------------------------------- */#
#/* Containers
#/* ----------------------------------------------------------------------- */#
input_files = []
output_file = None
#/* ----------------------------------------------------------------------- */#
#/* Parse arguments
#/* ----------------------------------------------------------------------- */#
i = 0
arg = None
arg_error = False
while i < len(args):
try:
arg = args[i]
# Help arguments
if arg in ('--help-info', '-help-info', '--helpinfo', '-help-info', '-h', '--h'):
return print_help_info()
elif arg in ('--help', '-help'):
return print_help()
elif arg in ('--usage', '-usage'):
return print_usage()
elif arg in ('--long-usage', '-long-usage'):
return print_long_usage()
elif arg in ('--version', '-version'):
return print_version()
elif arg in ('--short-version', '-short-version'):
return print_short_version()
elif arg in ('--license', '-license'):
return print_license()
# User feedback
elif arg in ('-q', '-quiet'):
i += 1
components.VERBOSE_MODE = False
# Define the output schema
elif arg in ('-s', '-schema', '-header'):
i += 2
output_schema = args[i - 1]
# Skip lines in input files
elif arg in ('-sl', '-skip-lines'):
i += 2
skip_lines = string2type(args[i - 1])
elif arg in ('-se', '-skip-empty'):
i += 1
skip_empty_lines = True
# Additional options
elif arg in ('-m', '-mode'):
i += 2
write_mode = args[i - 1]
# Catch invalid arguments
elif arg[0] == '-' and arg != '-':
i += 1
arg_error = True
vprint("ERROR: Unrecognized argument: %s" % arg)
# If reading from empty stdin, throw an error
elif arg == '-' and sys.stdin.isatty():
i += 1
arg_error = True
vprint("ERROR: Trying to read from empty stdin")
# Positional arguments and errors
else:
i += 1
# Catch output file
if output_file is None:
output_file = abspath(expanduser(arg))
else:
expanded_arg = abspath(expanduser(arg))
# Read from stdin
if arg == '-' and not sys.stdin.isatty():
f_list = sys.stdin
# Let python handle glob expansion
elif '*' in arg:
f_list = glob(expanded_arg)
# Argument is just a file path - wrap in a list to make iterable
else:
f_list = [expanded_arg]
# Loop through all records, normalize the path, and append to input files
for record in f_list:
r = abspath(expanduser(record))
if r not in input_files:
input_files.append(r)
# This catches several conditions:
# 1. The last argument is a flag that requires parameters but the user did not supply the parameter
# 2. The arg parser did not properly consume all parameters for an argument
# 3. The arg parser did not properly iterate the 'i' variable
# 4. An argument split on '=' doesn't have anything after '=' - e.g. '--output-file='
except (IndexError, ValueError):
i += 1
arg_error = True
vprint("ERROR: An argument has invalid parameters: %s" % arg)
#/* ----------------------------------------------------------------------- */#
#/* Validate parameters
#/* ----------------------------------------------------------------------- */#
# To be safe, force list of input files to be unique
# TODO: Determine if this is an unecessary waste of time
input_files = list(set(input_files))
bail = False
# Check arguments
if arg_error:
bail = True
vprint("ERROR: Did not successfully parse arguments")
# Check output file
if output_file is None:
bail = True
vprint("ERROR: Need an output file")
elif isfile(output_file) and not os.access(output_file, os.W_OK):
bail = True
vprint("ERROR: Need write access: %s" % output_file)
elif not isfile(output_file) and not os.access(dirname(output_file), os.W_OK):
bail = True
vprint("ERROR: Need write access: %s" % dirname(output_file))
# Check input files
if len(input_files) is 0:
bail = True
vprint("ERROR: Need at least one input file")
for ifile in input_files:
if not os.access(ifile, os.R_OK):
bail = True
vprint("ERROR: Can't access input file: %s" % ifile)
# Exit if something did not pass validation
if bail:
return 1
#/* ----------------------------------------------------------------------- */#
#/* Process files
#/* ----------------------------------------------------------------------- */#
# To prevent confusing the user, make default schema formatted the same as user input schema
# The cat_files() function can handle either input
if isinstance(output_schema, (list, tuple)):
output_schema = ','.join(output_schema)
vprint("Output file: %s" % output_file)
vprint("Write mode: %s" % write_mode)
vprint("Schema: %s" % output_schema)
vprint("Concatenating %s files ..." % len(input_files))
try:
if not raw.cat_files(input_files, output_file, schema=output_schema, write_mode=write_mode,
skip_lines=skip_lines, skip_empty=skip_empty_lines):
vprint("ERROR: Did not successfully concatenate files")
return 1
except Exception as e:
vprint(unicode(e))
return 1
vprint("Done")
return 0
#/* ======================================================================= */#
#/* Command Line Execution
#/* ======================================================================= */#
if __name__ == '__main__':
# Didn't get enough arguments - print usage and exit
if len(sys.argv) is 1:
sys.exit(print_usage())
# Got enough arguments - give sys.argv[1:] to main()
else:
sys.exit(main(sys.argv[1:]))
| StarcoderdataPython |
3380354 | from typing import Any, Dict, Mapping, TypedDict
from kolga.libs.service import Service
from kolga.settings import settings
from kolga.utils.general import (
DATABASE_DEFAULT_PORT_MAPPING,
POSTGRES,
get_deploy_name,
get_project_secret_var,
)
from kolga.utils.models import DockerImageRef, HelmValues
from kolga.utils.url import URL # type: ignore
class _Image(TypedDict, total=False):
registry: str
repository: str
tag: str
class _RequestLimits(TypedDict, total=False):
memory: str
cpu: str
class _Limits(TypedDict, total=False):
requests: _RequestLimits
class _Values(HelmValues):
fullnameOverride: str
image: _Image
postgresqlUsername: str
postgresqlPassword: str
postgresqlDatabase: str
resources: _Limits
class PostgresqlService(Service):
"""
TODO: Add support for multiple databases within one server
"""
def __init__(
self,
chart: str = "bitnami/postgresql",
chart_version: str = "7.7.2",
username: str = settings.DATABASE_USER,
password: str = <PASSWORD>,
database: str = settings.DATABASE_DB,
artifact_name: str = "DATABASE_URL",
**kwargs: Any,
) -> None:
kwargs["name"] = POSTGRES
kwargs["chart"] = chart
kwargs["chart_version"] = chart_version
kwargs["artifact_name"] = artifact_name
super().__init__(**kwargs)
self.username = username
self.password = password
self.database = database
image = DockerImageRef.parse_string(settings.POSTGRES_IMAGE)
self.memory_request = self.service_specific_values.get("MEMORY_REQUEST", "50Mi")
self.cpu_request = self.service_specific_values.get("CPU_REQUEST", "50m")
self.values: _Values = {
"image": {"repository": image.repository},
"fullnameOverride": get_deploy_name(track=self.track, postfix=self.name),
"postgresqlUsername": self.username,
"postgresqlPassword": <PASSWORD>.password,
"postgresqlDatabase": self.database,
"resources": {
"requests": {"memory": self.memory_request, "cpu": self.cpu_request},
},
}
if image.registry is not None:
self.values["image"]["registry"] = image.registry
if image.tag is not None:
self.values["image"]["tag"] = image.tag
def get_database_url(self) -> URL:
deploy_name = get_deploy_name(self.track)
port = DATABASE_DEFAULT_PORT_MAPPING[POSTGRES]
host = f"{deploy_name}-{POSTGRES}"
return URL(
drivername=POSTGRES,
host=host,
port=port,
username=self.username,
password=<PASSWORD>,
database=self.database,
)
def _get_default_database_values(
self, url: URL, service_name: str = ""
) -> Dict[str, str]:
"""
Return a set of default extra values that are non-user definable
Currently there is only support for the user to set a single value when
adding a service. This adds some default values in order for the application
to be able to get every part of the database URL separately.
Args:
url: The URL of the database as a single string
service_name: Prefixes for each value
Returns:
"""
return {
get_project_secret_var(
project_name=service_name, value="DATABASE_URL"
): str(url),
get_project_secret_var(
project_name=service_name, value="DATABASE_HOST"
): str(url.host),
get_project_secret_var(project_name=service_name, value="DATABASE_DB"): str(
url.database
),
get_project_secret_var(
project_name=service_name, value="DATABASE_PORT"
): str(url.port),
get_project_secret_var(
project_name=service_name, value="DATABASE_USERNAME"
): str(url.username),
get_project_secret_var(
project_name=service_name, value="DATABASE_PASSWORD"
): str(url.password),
}
def get_artifacts(self) -> Mapping[str, str]:
artifacts = {}
for service in self._prerequisite_of:
main_artifact_name = self.get_service_secret_artifact_name(service=service)
artifacts[main_artifact_name] = str(self.get_database_url())
artifacts.update(
self._get_default_database_values(
url=self.get_database_url(), service_name=service.name
)
)
return artifacts
| StarcoderdataPython |
9758756 | from os import listdir
from os.path import join
from itertools import product
import pandas as pd
class TestRun:
def test_output_files_exist(self, run_as_module):
csv_files = listdir("receiving")
dates = [
"20200801",
"20200802",
"20200803",
"20200804",
"20200805",
"20200806",
"20200807",
"20200808",
"20200809",
"20200810",
"20200811"
]
geos = ["county", "state", "hhs", "nation"]
metrics = ["anosmia", "ageusia", "sum_anosmia_ageusia"]
smoother = ["raw", "smoothed"]
expected_files = []
for date, geo, metric, smoother in product(dates, geos, metrics, smoother):
nf = "_".join([date, geo, metric, smoother, "research"]) + ".csv"
expected_files.append(nf)
set(csv_files) == set(expected_files)
def test_output_file_format(self, run_as_module):
df = pd.read_csv(
join("receiving", "20200810_state_anosmia_smoothed_search.csv")
)
assert (df.columns.values == [
"geo_id", "val", "se", "sample_size"]).all()
| StarcoderdataPython |
3307129 | import random
import numpy as np
print(517**13 % 1261)
print(413 +180)
print(512908935546875-406747768078*1261)
print(type(1.5)==float)
test_list = [10] * 10
print(test_list) | StarcoderdataPython |
6594440 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import yaml
def config_loader(config_file):
with config_file as f:
config = yaml.load(f)
return config
| StarcoderdataPython |
368727 | <filename>yawn/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-22 20:00
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.functions
import yawn.utilities.cron
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Execution',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.TextField(
choices=[('running', 'running'), ('succeeded', 'succeeded'), ('failed', 'failed'),
('killed', 'killed'), ('lost', 'lost')], default='running')),
('start_timestamp', models.DateTimeField(default=django.db.models.functions.Now)),
('stop_timestamp', models.DateTimeField(null=True)),
('exit_code', models.IntegerField(null=True)),
('stdout', models.TextField(blank=True, default='')),
('stderr', models.TextField(blank=True, default='')),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='Queue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.SlugField(allow_unicode=True, unique=True)),
],
),
migrations.CreateModel(
name='Run',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('submitted_time', models.DateTimeField()),
('scheduled_time', models.DateTimeField(null=True)),
('status',
models.TextField(choices=[('running', 'running'), ('succeeded', 'succeeded'), ('failed', 'failed')],
default='running')),
('parameters', django.contrib.postgres.fields.jsonb.JSONField(default=dict)),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.TextField(
choices=[('waiting', 'waiting'), ('queued', 'queued'), ('running', 'running'),
('succeeded', 'succeeded'), ('failed', 'failed'), ('upstream_failed', 'upstream_failed')],
default='waiting')),
('run', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='yawn.Run')),
],
),
migrations.CreateModel(
name='Template',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.SlugField(allow_unicode=True, db_index=False)),
('command', models.TextField()),
('max_retries', models.IntegerField(default=0)),
('timeout', models.IntegerField(null=True)),
('queue', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='yawn.Queue')),
('upstream', models.ManyToManyField(blank=True, related_name='downstream', to='yawn.Template')),
],
),
migrations.CreateModel(
name='Worker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
('status', models.TextField(choices=[('active', 'active'), ('exited', 'exited'), ('lost', 'lost')],
default='active')),
('start_timestamp', models.DateTimeField(default=django.db.models.functions.Now)),
('last_heartbeat', models.DateTimeField(default=django.db.models.functions.Now)),
],
),
migrations.CreateModel(
name='Workflow',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.IntegerField(editable=False)),
('schedule_active', models.BooleanField(default=False)),
('schedule', models.TextField(null=True, validators=[yawn.utilities.cron.cron_validator])),
('next_run', models.DateTimeField(null=True)),
('parameters', django.contrib.postgres.fields.jsonb.JSONField(default=dict)),
],
),
migrations.CreateModel(
name='WorkflowName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.SlugField(allow_unicode=True, unique=True)),
('current_version',
models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='is_current',
to='yawn.Workflow')),
],
),
migrations.AddField(
model_name='workflow',
name='name',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='yawn.WorkflowName'),
),
migrations.AddField(
model_name='template',
name='workflow',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.PROTECT, to='yawn.Workflow'),
),
migrations.AddField(
model_name='task',
name='template',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='yawn.Template'),
),
migrations.AddField(
model_name='run',
name='workflow',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='yawn.Workflow'),
),
migrations.AddField(
model_name='message',
name='queue',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='yawn.Queue'),
),
migrations.AddField(
model_name='message',
name='task',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='yawn.Task'),
),
migrations.AddField(
model_name='execution',
name='task',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='yawn.Task'),
),
migrations.AddField(
model_name='execution',
name='worker',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='yawn.Worker'),
),
migrations.AlterUniqueTogether(
name='workflow',
unique_together=set([('name', 'version')]),
),
migrations.AlterUniqueTogether(
name='template',
unique_together=set([('workflow', 'name')]),
),
]
| StarcoderdataPython |
3519048 | <filename>pinterest_scraper/scraper.py
from selenium import webdriver
from selenium.common.exceptions import StaleElementReferenceException
from selenium.webdriver.common.keys import Keys
import time,random,socket,unicodedata
import string, copy, os
import pandas as pd
import requests
try:
from urlparse import urlparse
except ImportError:
from six.moves.urllib.parse import urlparse
def download(myinput, mydir = "./"):
if isinstance(myinput, str) or isinstance(myinput, bytes):
#http://automatetheboringstuff.com/chapter11/
res = requests.get(myinput)
res.raise_for_status()
#https://stackoverflow.com/questions/18727347/how-to-extract-a-filename-from-a-url-append-a-word-to-it
outfile = mydir + "/" + os.path.basename(urlparse(myinput).path)
playFile = open(outfile, 'wb')
for chunk in res.iter_content(100000):
playFile.write(chunk)
playFile.close()
elif isinstance(myinput, list):
for i in myinput:
download(i, mydir)
else:
pass
def phantom_noimages():
from fake_useragent import UserAgent
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
ua = UserAgent()
#ua.update()
#https://stackoverflow.com/questions/29916054/change-user-agent-for-selenium-driver
caps = DesiredCapabilities.PHANTOMJS
caps["phantomjs.page.settings.userAgent"] = ua.random
return webdriver.PhantomJS(service_args=["--load-images=no"], desired_capabilities=caps)
def randdelay(a,b):
time.sleep(random.uniform(a,b))
def u_to_s(uni):
return unicodedata.normalize('NFKD',uni).encode('ascii','ignore')
class Pinterest_Helper(object):
def __init__(self, login, pw, browser = None):
if browser is None:
#http://tarunlalwani.com/post/selenium-disable-image-loading-different-browsers/
profile = webdriver.FirefoxProfile()
profile.set_preference("permissions.default.image", 2)
self.browser = webdriver.Firefox(firefox_profile=profile)
else:
self.browser = browser
self.browser.get("https://www.pinterest.com")
emailElem = self.browser.find_element_by_name('id')
emailElem.send_keys(login)
passwordElem = self.browser.find_element_by_name('password')
passwordElem.send_keys(pw)
passwordElem.send_keys(Keys.RETURN)
randdelay(2,4)
def getURLs(self, urlcsv, threshold = 500):
tmp = self.read(urlcsv)
results = []
for t in tmp:
tmp3 = self.runme(t, threshold)
results = list(set(results + tmp3))
random.shuffle(results)
return results
def write(self, myfile, mylist):
tmp = pd.DataFrame(mylist)
tmp.to_csv(myfile, index=False, header=False)
def read(self,myfile):
tmp = pd.read_csv(myfile,header=None).values.tolist()
tmp2 = []
for i in range(0,len(tmp)):
tmp2.append(tmp[i][0])
return tmp2
def runme(self,url, threshold = 500, persistence = 120, debug = False):
final_results = []
previmages = []
tries = 0
try:
self.browser.get(url)
while threshold > 0:
try:
results = []
images = self.browser.find_elements_by_tag_name("img")
if images == previmages:
tries += 1
else:
tries = 0
if tries > persistence:
if debug == True:
print("Exitting: persistence exceeded")
return final_results
for i in images:
src = i.get_attribute("src")
if src:
if src.find("/236x/") != -1:
src = src.replace("/236x/","/736x/")
results.append(u_to_s(src))
previmages = copy.copy(images)
final_results = list(set(final_results + results))
dummy = self.browser.find_element_by_tag_name('a')
dummy.send_keys(Keys.PAGE_DOWN)
randdelay(1,2)
threshold -= 1
except (StaleElementReferenceException):
if debug == True:
print("StaleElementReferenceException")
threshold -= 1
except (socket.error, socket.timeout):
if debug == True:
print("Socket Error")
except KeyboardInterrupt:
return final_results
if debug == True:
print("Exitting at end")
return final_results
def runme_alt(self,url, threshold = 500, tol = 10, minwait = 1, maxwait = 2,debug = False):
final_results = []
heights = []
dwait = 0
try:
self.browser.get(url)
while threshold > 0:
try:
results = []
images = self.browser.find_elements_by_tag_name("img")
cur_height = self.browser.execute_script("return document.documentElement.scrollTop")
page_height = self.browser.execute_script("return document.body.scrollHeight")
heights.append(int(page_height))
if debug == True:
print("Current Height: " + str(cur_height))
print("Page Height: " + str(page_height))
if len(heights) > tol:
if heights[-tol:] == [heights[-1]]*tol:
if debug == True:
print("No more elements")
return final_results
else:
if debug == True:
print("Min element: {}".format(str(min(heights[-tol:]))))
print("Max element: {}".format(str(max(heights[-tol:]))))
for i in images:
src = i.get_attribute("src")
if src:
if src.find("/236x/") != -1:
src = src.replace("/236x/","/736x/")
results.append(u_to_s(src))
final_results = list(set(final_results + results))
self.browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
randdelay(minwait,maxwait)
threshold -= 1
except (StaleElementReferenceException):
if debug == True:
print("StaleElementReferenceException")
threshold -= 1
except (socket.error, socket.timeout):
if debug == True:
print("Socket Error. Waiting {} seconds.".format(str(dwait)))
time.sleep(dwait)
dwait += 1
#except (socket.error, socket.timeout):
# if debug == True:
# print("Socket Error")
except KeyboardInterrupt:
return final_results
if debug == True:
print("Exitting at end")
return final_results
def scrape_old(self, url):
results = []
self.browser.get(url)
images = self.browser.find_elements_by_tag_name("img")
for i in images:
src = i.get_attribute("src")
if src:
if string.find(src,"/236x/") != -1:
src = string.replace(src,"/236x/","/736x/")
results.append(u_to_s(src))
return results
def close(self):
self.browser.close()
| StarcoderdataPython |
9766969 | # Python3 Minimum Number of Jumps needed to reach End
# Given an array of integers where each element represents the max number
# of steps that can be made forward from that element. Write a function to
# return the minimum number of jumps to reach the end of the array (starting
# from the first element). If an element is 0, then cannot move through that element.
def MinJumps(arr):
n = len(arr)
if n == 0 or arr[0] == 0:
return -1
jumps = [0 for _ in range(n)]
for i in range(1, n):
jumps[i] = float('inf')
for j in range(i):
# print(i, j, arr[j], jumps[j])
if i <= j + arr[j] and jumps[j] != float('inf'):
jumps[i] = min(jumps[i], jumps[j] + 1)
break
return jumps[n-1]
print(MinJumps([1, 3, 5, 8, 9, 2, 6, 7, 6, 8, 9])) | StarcoderdataPython |
9643221 | <gh_stars>0
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.validators import RegexValidator, validate_email
class Technician(models.Model):
first_name = models.CharField(max_length=200, verbose_name = _('ονομα'))
surname = models.CharField(max_length=200, verbose_name = _('επιθετο'))
phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$', message="Ο αριθμός τηλεφώνου πρέπει να είναι της μορφής: '+999999999'. Επιτρέπονται μέχρι 15 ψηφία.")
phone = models.CharField(validators=[phone_regex], max_length=17, blank=True, verbose_name = _('σταθερό'))
phone_other = models.CharField(validators=[phone_regex], max_length=17, blank=True, verbose_name = _('αλλο'))
email = models.CharField(validators=[validate_email], max_length=200, verbose_name = _('email'))
class Meta:
verbose_name = _('Τεχνικος')
verbose_name_plural = _('Τεχνικοι')
def __str__(self):
return '{} {} ({})'.format(self.first_name, self.surname, self.phone)
class Customer(models.Model):
first_name = models.CharField(max_length=200, verbose_name = _('ονομα'))
surname = models.CharField(max_length=200, verbose_name = _('επιθετο'))
phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$', message="Ο αριθμός τηλεφώνου πρέπει να είναι της μορφής: '+999999999'. Επιτρέπονται μέχρι 15 ψηφία.")
phone = models.CharField(validators=[phone_regex], max_length=17, blank=True, verbose_name = _('σταθερό'))
phone_mobile = models.CharField(validators=[phone_regex], max_length=17, blank=True, verbose_name = _('κινητο'))
phone_other = models.CharField(validators=[phone_regex], max_length=17, blank=True, verbose_name = _('αλλο'))
address = models.CharField(max_length=200, verbose_name = _('διευθυνση'))
class Meta:
verbose_name = _('Πελατης')
verbose_name_plural = _('Πελατες')
def __str__(self):
return '{} {} ({})'.format(self.first_name, self.surname, self.phone)
class Product(models.Model):
make = models.CharField(max_length=200, verbose_name = _('μαρκα'))
model = models.CharField(max_length=200, verbose_name = _('μοντέλο'))
product_type = models.CharField(max_length=200, verbose_name = _('προϊόν'))
damage = models.TextField(verbose_name = _('βλαβη'))
notes = models.TextField(blank=True, verbose_name = _('μαρκα'))
purchase_date = models.DateField(verbose_name = _('ημερομηνία αγοράς'))
reported_date = models.DateTimeField(verbose_name = _('ημερομηνία αναφοράς'))
repaired_date = models.DateTimeField(blank=True, null=True, verbose_name = _('ημερομηνία επισκευής'))
customer = models.ForeignKey(Customer, on_delete=models.CASCADE, verbose_name = _('πελάτης'))
technician = models.ForeignKey(Technician, on_delete=models.CASCADE, null=True, blank=True, verbose_name = _('τεχνικός'))
class Meta:
verbose_name = _('ΠΡΟΙΟΝ')
verbose_name_plural = _('ΠΡΟΙΟΝΤΑ')
def __str__(self):
return '{} {} ({}) - {}, ΕΠΙΣΚΕΥΑΣΤΗΚΕ {}'.format(self.make, self.model, self.product_type, self.customer, self.technician) | StarcoderdataPython |
1946337 | from datetime import datetime, timezone
import json
import os
import sys
import pika
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import imports.broker as broker
import imports.db as db
from imports.logging import get_logger
import imports.requests
SPOTIFY_MARKET = os.environ["SPOTIFY_MARKET"]
READING_QUEUE_NAME = "artists"
WRITING_QUEUE_NAME = "albums"
MAX_RETRY_ATTEMPTS = 10
log = get_logger(os.path.basename(__file__))
def filter_album(album):
return (
album["release_date"] >= "2021"
and album["album_type"] != "compilation"
and album["artists"][0]["name"] != "Various Artists"
)
def main():
consume_channel = broker.create_channel(READING_QUEUE_NAME)
publish_channel = broker.create_channel(WRITING_QUEUE_NAME)
db_connection, cursor = db.init_connection()
sp = spotipy.Spotify(auth_manager=SpotifyClientCredentials())
def callback(ch, method, properties, body):
"""Handle received artist's data from the queue"""
message = json.loads(body.decode())
artist_id = message["spotify_id"]
total_albums = message["total_albums"]
artist_name = message["name"]
log.info(
"👨🏽🎤 Processing",
name=artist_name,
spotify_id=message["spotify_id"],
object="artist",
)
attempts = 0
while attempts < MAX_RETRY_ATTEMPTS:
try:
results = sp.artist_albums(
artist_id=artist_id,
album_type="album,single,appears_on",
offset=0,
limit=50,
country=SPOTIFY_MARKET,
)
log.info("Trying API request", attempt=attempts)
break
except Exception as e:
attempts += 1
log.exception(
"Unhandled exception", exception=e, attempt=attempts, exc_info=True
)
if total_albums >= results["total"]:
log.info("No new albums", spotify_id=artist_id, object="artist")
ch.basic_ack(method.delivery_tag)
return
new_albums = results["total"] - total_albums
log.info(
"👨🏽🎤 New releases",
spotify_id=artist_id,
albums_count=new_albums,
object="artist",
)
# We need to do make several requests since data is sorted by albumy type
# and then by release date
# An option would be to do separate requestes for `albums` and `singles`
items = results["items"]
while results["next"]:
attempts = 0
# attemps hack because API is returning random 404 errors
while attempts < MAX_RETRY_ATTEMPTS:
try:
results = sp.next(results)
log.info("Trying API request", attempt=attempts)
break
except Exception as e:
attempts += 1
log.exception(
"Unhandled exception",
exception=e,
attempt=attempts,
exc_info=True,
)
items.extend(results["items"])
# Update total albums for the current artist
# Even though we might not store all those albums
# It is used for determining if there are new releases in Spotify's database
try:
cursor.execute(
"UPDATE artists SET total_albums=%s, albums_updated_at=%s WHERE spotify_id=%s;",
(results["total"], datetime.now(timezone.utc), artist_id),
)
except Exception as e:
log.exception("Unhandled exception", exception=e, exc_info=True)
else:
log.info(
"👨🏽🎤 Updated total albums",
spotify_id=artist_id,
total_albums=results["total"],
prev_total_albums=total_albums,
object="artist",
)
for i, item in enumerate(items):
artists = []
for artist in item["artists"]:
artists.append(artist["name"])
release_date = item["release_date"]
if "0000" == release_date:
release_date = "0001"
if item["release_date_precision"] == "year":
release_date += "-01-01"
elif item["release_date_precision"] == "month":
release_date += "-01"
try:
cursor.execute(
"INSERT INTO albums (spotify_id, name, main_artist, all_artists, from_discography_of, album_group, album_type, release_date, release_date_precision, total_tracks, from_discography_of_spotify_id, main_artist_spotify_id) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT DO NOTHING;",
(
item["id"],
item["name"],
artists[0],
artists,
artist_name,
item["album_group"],
item["album_type"],
release_date,
item["release_date_precision"],
item["total_tracks"],
artist_id,
item["artists"][0]["id"],
),
)
except Exception as e:
log.exception("Unhandled exception", exception=e, exc_info=True)
else:
log.info(
"💿 Album " + ("saved" if cursor.rowcount else "exists"),
spotify_id=item["id"],
name=item["name"],
main_artist=artists[0],
object="album",
)
# Publish to queue only if it was added (which means it was not in the db yet)
if cursor.rowcount:
# Only publish (to get details) for albums that pass the test
# Should this be here? Where should I filter this?
if filter_album(item):
publish_channel.basic_publish(
exchange="",
routing_key=WRITING_QUEUE_NAME,
body=json.dumps(
{
"spotify_id": item["id"],
"album_name": item["name"],
"album_artist": artists[0],
"album_artist_spotify_id": item["artists"][0]["id"],
}
),
properties=pika.BasicProperties(
delivery_mode=pika.spec.PERSISTENT_DELIVERY_MODE
),
)
ch.basic_ack(method.delivery_tag)
consume_channel.basic_qos(prefetch_count=1)
consume_channel.basic_consume(
on_message_callback=callback, queue=READING_QUEUE_NAME
)
print(" [*] Waiting for messages. To exit press CTRL+C")
consume_channel.start_consuming()
# Clean up and close connections
broker.close_connection()
db.close_connection(db_connection, cursor)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("Interrupted")
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| StarcoderdataPython |
9640098 | """Add tmdb_id column
Revision ID: 01602e6e9637
Revises: 050b415e6bc9
Create Date: 2020-01-09 07:38:13.146945
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '01602e6e9637'
down_revision = '050b415e6bc9'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('download', sa.Column('tmdb_id', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('download', 'tmdb_id')
# ### end Alembic commands ###
| StarcoderdataPython |
8035771 | import itertools
import os
import numpy as np
import pandas
import webbrowser
data_path = "../out-data/out_top1k_no_pca"
compared_files = [
"methods lof {'algorithm': 'ball_tree', 'contamination': 5e-05, 'n_neighbors': 2}.csv",
"methods lof {'algorithm': 'ball_tree', 'contamination': 5e-05, 'n_neighbors': 5}.csv",
"methods lof {'algorithm': 'ball_tree', 'contamination': 5e-05, 'n_neighbors': 10}.csv",
# "methods lof {'algorithm': 'kd_tree', 'contamination': 5e-05, 'n_neighbors': 2}.csv",
# "methods lof {'algorithm': 'kd_tree', 'contamination': 5e-05, 'n_neighbors': 5}.csv",
# "methods lof {'algorithm': 'kd_tree', 'contamination': 5e-05, 'n_neighbors': 10}.csv",
# "methods svm {'gamma': 0.1, 'kernel': 'poly', 'nu': 5e-05}.csv",
# "methods svm {'gamma': 0.1, 'kernel': 'rbf', 'nu': 5e-05}.csv",
]
report_path = "../out-data/seim_reports/conf_1-3_intersection.txt"
def signature_to_url_and_fname(signature):
signature = signature[6:] # drop 'repos/'
split_1 = signature.split(sep='__', maxsplit=1)
account = split_1[0]
signature = split_1[1]
split_2 = signature.split(sep='.kt:', maxsplit=1)
repo_and_filename = split_2[0]
fun_name = split_2[1]
split_3 = repo_and_filename.split(sep='/', maxsplit=1)
repo = split_3[0]
filepath = split_3[1]
return np.array([f"https://github.com/{account}/{repo}/blob/master/{filepath}.kt", fun_name])
sets = []
for filename in compared_files:
data = pandas.read_csv(os.path.join(data_path, filename))
data = np.array(data)
sets.append(set(data[:, 1]))
res = set.union(*sets)
out_info = np.array([signature_to_url_and_fname(entry) for entry in res])
np.savetxt(report_path, out_info, fmt='%s', delimiter='\n', newline='\n\n')
print(f"Total {len(res)} anomalies.")
print(f"Opening code in browser...")
for entry in out_info:
webbrowser.open(entry[0], new=2)
| StarcoderdataPython |
143101 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This loads the user's _config.py file and provides a standardized interface
into it."""
import os
import sys
import urllib.parse
import re
from . import cache
from . import controller
from . import filter
from .cache import zf
zf.config = sys.modules['zeekofile.config']
__loaded = False
class UnknownConfigSectionException(Exception):
pass
class ConfigNotFoundException(Exception):
pass
override_options = {}
site = cache.HierarchicalCache()
controllers = cache.HierarchicalCache()
filters = cache.HierarchicalCache()
def recompile():
global site
site.compiled_file_ignore_patterns = []
for p in site.file_ignore_patterns:
if isinstance(p, str):
site.compiled_file_ignore_patterns.append(
re.compile(p, re.IGNORECASE))
else:
site.compiled_file_ignore_patterns.append(p)
global blog
blog.url = urllib.parse.urljoin(site.url, blog.path)
zeekofile_codebase = os.path.dirname(__file__)
default_path = os.path.join(zeekofile_codebase, "_default_config.py")
def __load_config(path=None):
# Strategy:
# 1) Load the default config
# 2) Load the filters and controllers
# 3) Finally load the user's config.
# This will ensure that we have good default values if the user's
# config is missing something.
exec(open(default_path).read(), globals(), locals())
filter.preload_filters(
directory=os.path.join(zeekofile_codebase, '_filters'))
filter.preload_filters()
controller.load_controllers(
directory=os.path.join(zeekofile_codebase, '_controllers'))
controller.load_controllers()
if path:
exec(open(path).read(), globals(), locals())
# config is now in locals() but needs to be in globals()
for k, v in locals().items():
globals()[k] = v
# Override any options (from unit tests)
for k, v in override_options.items():
if "." in k:
parts = k.split(".")
cache_object = ".".join(parts[:-1])
setting = parts[-1]
cache_object = eval(cache_object)
cache_object[setting] = v
else:
globals()[k] = v
recompile()
__loaded = True
def init(config_file_path=None):
# Initialize the config, if config_file_path is None,
# just load the default config
if config_file_path:
if not os.path.isfile(config_file_path):
raise ConfigNotFoundException
__load_config(config_file_path)
else:
__load_config()
return globals()['__name__']
| StarcoderdataPython |
1842297 | import os
import random
import string
from base64 import b64encode
from datetime import datetime, timedelta
from typing import List
import pytz
from firestore_ci import FirestoreDocument
from flask import request, url_for
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from config import Config, today
class Workshop(FirestoreDocument):
def __init__(self):
super().__init__()
self.title: str = str()
self.topic: str = str()
self.date: datetime = today()
self.time: str = str()
self.instructor: str = str()
self.venue: str = str()
self.image_url: str = str()
self.materials: List[str] = list()
self.bg_color: int = 0xFFFFFF # white
self.signature: str = str()
self.participants: int = 0
self.url_expiration: datetime = today()
@property
def date_format(self) -> str:
return self.date.astimezone(pytz.timezone("Asia/Kolkata")).strftime("%a, %d-%b-%y")
@property
def valid_url(self) -> bool:
if not self.signature:
return False
if self.url_expiration < datetime.now(tz=pytz.UTC):
return False
return True
@property
def url(self) -> str:
domain: str = request.host_url
reference: str = url_for('certificate_preparation', workshop_id=self.id, signature=self.signature)
if domain[-1] == reference[0] == "/":
return f"{domain[:-1]}{reference}"
return f"{domain}{reference}"
def generate_url(self) -> None:
if self.valid_url:
return
self.signature: str = "".join(random.choices(string.ascii_letters + string.digits, k=128))
self.url_expiration: datetime = datetime.now(tz=pytz.UTC) + timedelta(days=7)
self.save()
def valid_signature(self, signature: str) -> bool:
return (self.valid_url and self.signature == signature)
Workshop.init()
class Participant(FirestoreDocument):
def __init__(self):
super().__init__()
self.name: str = str()
self.workshop_id: str = str()
self.name_key: str = str()
self.phone: str = str()
self.certificate_pdf: str = str()
Participant.init()
class User(FirestoreDocument, UserMixin):
def __init__(self):
super().__init__()
self.email: str = str()
self.password_hash: str = str()
self.token: str = str()
self.token_expiration: datetime = datetime.now(tz=pytz.UTC)
def __repr__(self):
return self.email
def get_id(self) -> str:
return self.email
def get_token(self, expires_in=Config.TOKEN_EXPIRY) -> str:
now: datetime = datetime.now(tz=pytz.UTC)
if self.token and self.token_expiration > now + timedelta(seconds=60):
return self.token
self.token: str = b64encode(os.urandom(24)).decode()
self.token_expiration: datetime = now + timedelta(seconds=expires_in)
self.save()
return self.token
def revoke_token(self) -> None:
self.token_expiration: datetime = datetime.utcnow() - timedelta(seconds=1)
self.save()
def set_password(self, password) -> None:
self.password_hash: str = generate_password_hash(password)
self.save()
def check_password(self, password) -> bool:
return check_password_hash(self.password_hash, password)
User.init()
| StarcoderdataPython |
5063669 | """Tests for Ifcfg."""
import unittest
from nose.tools import ok_, eq_, raises
from nose import SkipTest
import ifcfg
from . import ifconfig_out
class IfcfgTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_ifcfg(self):
interfaces = ifcfg.interfaces()
res = len(interfaces) > 0
ok_(res)
@raises(ifcfg.exc.IfcfgParserError)
def test_unknown(self):
parser = ifcfg.get_parser(distro='Bogus', kernel='55')
def test_linux(self):
parser = ifcfg.get_parser(distro='Linux', kernel='4',
ifconfig=ifconfig_out.LINUX)
interfaces = parser.interfaces
eq_(interfaces['eth0']['ether'], '1a:2b:3c:4d:5e:6f')
eq_(interfaces['eth0']['inet'], '192.168.0.1')
eq_(interfaces['eth0']['broadcast'], '192.168.0.255')
eq_(interfaces['eth0']['netmask'], '255.255.255.0')
def test_linux2(self):
parser = ifcfg.get_parser(distro='Linux', kernel='2.6',
ifconfig=ifconfig_out.LINUX2)
interfaces = parser.interfaces
eq_(interfaces['eth0']['ether'], '1a:2b:3c:4d:5e:6f')
eq_(interfaces['eth0']['inet'], '192.168.0.1')
eq_(interfaces['eth0']['broadcast'], '192.168.0.255')
eq_(interfaces['eth0']['netmask'], '255.255.255.0')
def test_linux3(self):
parser = ifcfg.get_parser(distro='Linux', kernel='3.3',
ifconfig=ifconfig_out.LINUX3)
interfaces = parser.interfaces
eq_(interfaces['eth0']['ether'], '1a:2b:3c:4d:5e:6f')
eq_(interfaces['eth0']['inet'], '192.168.0.1')
eq_(interfaces['eth0']['broadcast'], '192.168.0.255')
eq_(interfaces['eth0']['netmask'], '255.255.255.0')
def test_macosx(self):
parser = ifcfg.get_parser(distro='MacOSX', kernel='11.4.0',
ifconfig=ifconfig_out.MACOSX)
interfaces = parser.interfaces
eq_(interfaces['en0']['ether'], '1a:2b:3c:4d:5e:6f')
eq_(interfaces['en0']['inet'], '192.168.0.1')
eq_(interfaces['en0']['broadcast'], '192.168.0.255')
eq_(interfaces['en0']['netmask'], '255.255.255.0')
def test_default_interface(self):
res = ifcfg.default_interface()
ok_(res) | StarcoderdataPython |
3574885 | class Languages:
RU = 'ru'
EN = 'en'
DEFAULT = EN
LANGUAGES = [RU, EN]
| StarcoderdataPython |
11232808 | <reponame>siboles/pyCellAnalyst
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
__all__ = ['Image',
'Filters',
'FilteringPipeline',
'Helpers',
'RegionsOfInterest',
'Segmentation',
'Deformation',
'Visualization']
from pyCellAnalyst.Image import *
from pyCellAnalyst.Filters import *
from pyCellAnalyst.FilteringPipeline import *
from pyCellAnalyst.Helpers import *
from pyCellAnalyst.RegionsOfInterest import *
from pyCellAnalyst.Segmentation import *
from pyCellAnalyst.Deformation import *
from pyCellAnalyst.Visualization import *
| StarcoderdataPython |
1851709 | <reponame>s-tar/just-a-chat
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'mr.S'
from sqlalchemy import create_engine, Column, Integer, Sequence, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
from kernel.config import config
from functools import wraps
import inspect
from bottle import request
Base = declarative_base()
engine = create_engine(config['db']['type']+"://"+config['db']['username']+":"+config['db']['password']
+ "@"+config['db']['host']+":"+str(config['db']['port'])+"/"+config['db']['db']
, encoding='utf8', echo=False, pool_recycle=3600)
Base.metadata.create_all(engine)
db = sessionmaker(bind=engine, autocommit=False, expire_on_commit=False)()
def new_connection():
return sessionmaker(bind=engine, autocommit=False, expire_on_commit=False)()
class Database(object):
def __init__(self):
self._connection = None
def __call__(self, entity=None):
if not self._connection:
self._connection = new_connection()
if entity:
if inspect.isclass(entity):
if hasattr(entity, '__table__'):
return EntityWrapper(entity, self._connection)
else:
return entity
return self._connection
def __getattr__(self, name):
try:
return self.__dict__[name]
except KeyError: pass
if self._connection:
return getattr(self._connection, name)
raise KeyError
def new_connection(self):
self._connection = sessionmaker(bind=engine, autocommit=False, expire_on_commit=False)()
class EntityWrapper():
def __init__(self, entity, connection):
self._entity = entity
self._connection = connection
def __getattr__(self, item):
attr = getattr(self._entity, item, None)
if attr:
if callable(attr):
@wraps(attr)
def method(*args, **kwargs):
return attr(self._connection, *args, **kwargs)
return method
else:
return attr | StarcoderdataPython |
11330410 | from .neuralnetwork import NeuralNetworkGenerator
from .supportvector import SupportVectorMachine
def generate_code(method='neuralnetwork', *args, **kwargs):
if method == 'neuralnetwork':
Code = NeuralNetworkGenerator(*args, **kwargs)
elif method == 'supportvector':
Code = SupportVectorMachine(*args, **kwargs)
return Code.end()
| StarcoderdataPython |
3597917 | from django.shortcuts import render
from album.models import Album
def index(request,id,slug_do_album):
album = Album.objects.get(id=id,slug=slug_do_album)
musicas = album.musicas.all()
return render(request,'album/album.html',{'album':album,
'musicas':musicas}) | StarcoderdataPython |
1996539 | <reponame>Sebrowsky/Mailpile<filename>mailpile/plugins/exporters.py
import mailbox
import os
import time
from gettext import gettext as _
import mailpile.config
from mailpile.plugins import PluginManager
from mailpile.util import *
from mailpile.commands import Command
from mailpile.mailutils import Email
_plugins = PluginManager(builtin=os.path.basename(__file__)[:-3])
##[ Configuration ]###########################################################
MAILBOX_FORMATS = ('mbox', 'maildir')
_plugins.register_config_variables('prefs', {
'export_format': ['Default format for exporting mail',
MAILBOX_FORMATS, 'mbox'],
})
##[ Commands ]################################################################
class ExportMail(Command):
"""Export messages to an external mailbox"""
SYNOPSIS = (None, 'export', None, '<msgs> [flat] [<fmt>:<path>]')
ORDER = ('Searching', 99)
def export_path(self, mbox_type):
if mbox_type == 'mbox':
return 'mailpile-%d.mbx' % time.time()
else:
return 'mailpile-%d'
def create_mailbox(self, mbox_type, path):
if mbox_type == 'mbox':
return mailbox.mbox(path)
elif mbox_type == 'maildir':
return mailbox.Maildir(path)
raise UsageError('Invalid mailbox type: %s' % mbox_type)
def command(self, save=True):
session, config, idx = self.session, self.session.config, self._idx()
mbox_type = config.prefs.export_format
args = list(self.args)
if args and ':' in args[-1]:
mbox_type, path = args.pop(-1).split(':', 1)
else:
path = self.export_path(mbox_type)
if args and args[-1] == 'flat':
flat = True
args.pop(-1)
else:
flat = False
if os.path.exists(path):
return self._error('Already exists: %s' % path)
msg_idxs = list(self._choose_messages(args))
if not msg_idxs:
session.ui.warning('No messages selected')
return False
# Exporting messages without their threads barely makes any
# sense.
if not flat:
for i in reversed(range(0, len(msg_idxs))):
mi = msg_idxs[i]
msg_idxs[i:i+1] = [int(m[idx.MSG_MID], 36)
for m in idx.get_conversation(msg_idx=mi)]
# Let's always export in the same order. Stability is nice.
msg_idxs.sort()
mbox = self.create_mailbox(mbox_type, path)
exported = {}
while msg_idxs:
msg_idx = msg_idxs.pop(0)
if msg_idx not in exported:
e = Email(idx, msg_idx)
session.ui.mark('Exporting =%s ...' % e.msg_mid())
mbox.add(e.get_msg())
exported[msg_idx] = 1
mbox.flush()
return self._success(
_('Exported %d messages to %s') % (len(exported), path),
{
'exported': len(exported),
'created': path
})
_plugins.register_commands(ExportMail)
| StarcoderdataPython |
1607755 | import tkinter as tk
from tkinter import ttk
from PIL import ImageTk, Image
def main():
root=tk.Tk()
root.title('LabelwithIcon')
framettk=ttk.Frame(root)
framettk.grid()
icon = ImageTk.PhotoImage(Image.open('pen.png'))
pen_label=ttk.Label(
framettk,
image=icon)
pen_label.grid(row=1,column=1)
s=tk.StringVar()
s.set("Going Back To School")
right_label=ttk.Label(
framettk,
textvariable=s,
width=30,
anchor=tk.W,
padding=(30,10))
right_label.grid(row=1,column=2)
root.mainloop()
if __name__ == '__main__':
main() | StarcoderdataPython |
96708 | <reponame>smadardan/HMM<gh_stars>0
"""
src.HMM
~~~~~~~~~
in this file you can find the utilities for viterbi and backward forward. this will help to take real example and
convert to hidden markov model input
"""
import random
class Hmm(object):
def __init__(self, a, b, pi, obs_names, hidden_names, logger):
self.a = a
self.b = b
self.pi = pi
self.obs_names = obs_names
self.hidden_names = hidden_names
self.logger = logger
def _sample_category(self, ps):
p = random.random()
s = ps[0]
i = 0
while s < p and i < len(ps):
i += 1
s += ps[i]
self.logger.debug('the samples: {}'.format(i))
return i
def convert_obs_names_to_nums(self, xs):
"""
convert the observation names to numbers so we could append by location
:param xs:
:return: the observations as numbers - 0 for obs_names in the 0 place and
1 for the name in the 1st place
"""
obs_nums = [self.obs_names.index(i) for i in xs]
return obs_nums
def convert_hidden_num_to_name(self, hidden):
"""
takes a string of 1 and 0, and returns the name of the hidden
state
:param hidden: list of ints (1 or 0)
:return: list of hidden names
"""
hidden_outcome = []
for val in hidden:
hidden_outcome.append(self.hidden_names[val])
return hidden_outcome
def generate(self, size):
zs, xs = [], []
seq_prob = 1.
z_t = None
for i in range(size):
if i == 0:
z_t = self._sample_category(self.pi)
seq_prob *= self.pi[z_t]
else:
a_z_t = self.a[z_t]
z_t = self._sample_category(a_z_t)
seq_prob *= a_z_t[z_t]
x_t = self._sample_category(self.b[z_t])
zs.append(self.hidden_names[z_t])
xs.append(self.obs_names[x_t])
seq_prob *= self.b[z_t][x_t]
self.logger.debug('the generated data: \nxs: {}\nzs: {}\nseq_prob: {}'.format(xs, zs, seq_prob))
return xs, zs, seq_prob
| StarcoderdataPython |
381570 | import sys
def fizzbuzz(a, b, n):
l = []
for i in xrange(1, n+1):
if i%a == 0 and i%b == 0:
l.append("FB")
elif i%a == 0:
l.append("F")
elif i%b == 0:
l.append("B")
else:
l.append(str(i))
return " ".join(l)
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
a, b, n = map(int, test.split())
print fizzbuzz(a, b, n)
test_cases.close() | StarcoderdataPython |
3250566 | import sys
from util.ODXTServer import ODXTServer, ODXTServerV2, serverReqHandler, serverReqHandlerV2
HOST = 'localhost'
PORT = 50057
if __name__ == "__main__":
HOST = sys.argv[1]
PORT = int(sys.argv[2])
server = ODXTServerV2((HOST, PORT), serverReqHandlerV2)
server.serve_forever()
| StarcoderdataPython |
6672360 | #!/usr/bin/env python3
import sys
NON_CONGES = '0'
INCR_CONGES = '1'
MAX_CONGES = '2'
DECR_CONGES = '3'
BIN_WINDOW = 0.0006
MEDIAN_VAL = 0.00103
ACCEPTABLE_VARIANCE = 0.000200
MIN_VAL = 0.0005
if __name__ == "__main__":
fname = sys.argv[1]
# Assume lines have format: timestamp delay
with open(fname, 'r') as infile:
with open(fname+"_binned", 'w') as outfile:
state = NON_CONGES
prev_time = 0
sample_sum = 0
num_drops = 0
line_set = []
for line in infile:
val = float(line.split()[1])
time_stamp = float(line.split()[0])
line_set.append(line.strip())
sample_sum += val
if val < MIN_VAL:
num_drops += 1
if time_stamp - prev_time > BIN_WINDOW:
temp = sample_sum / len(line_set)
if num_drops > len(line_set) / 3:
state = MAX_CONGES
elif temp > MEDIAN_VAL + ACCEPTABLE_VARIANCE:
if (state == NON_CONGES or state == INCR_CONGES):
#Last state was uncongested, so congestion is increasing
state = INCR_CONGES
elif (state == MAX_CONGES or state == DECR_CONGES):
#Last state was dropping packets, so conges decreasing
state = DECR_CONGES
else:
state = NON_CONGES
for l in line_set:
outfile.write(l + " " + state + '\n')
line_set = []
prev_time = time_stamp
sample_sum = 0
num_drops = 0
for l in line_set:
outfile.write(l + " " + state + '\n')
| StarcoderdataPython |
358510 | class _Snake:
def __init__(self, color):
self.head = ()
self.body = []
self.color = color
self.last_op = 'w'
self.score = 0
self.operation = {
'w': lambda x, y: (x - 1, y),
'a': lambda x, y: (x, y - 1),
's': lambda x, y: (x + 1, y),
'd': lambda x, y: (x, y + 1),
}
def move(self, key, field_size, apple):
if key not in 'wasd':
key = self.last_op
new_head = self.operation[key](*self.head)
# snake can not go back
if self.body and new_head == self.body[0]:
key = self.last_op
new_head = self.operation[key](*self.head)
# snake grows from eating apple
if new_head == apple:
self.body = [self.head] + self.body
self.score += 1
elif self.body:
self.body = [self.head] + self.body[:-1]
# snake dies from boundaries
if not (0 <= new_head[0] < field_size and 0 <= new_head[1] < field_size):
print(f"{self} is dead")
return False
# snake dies from itself
if new_head in self.body:
print(f"{self} is dead")
return False
self.head = new_head
self.last_op = key
return True
def attack(self, other):
"""
Rule: Snake that crashed into another snake - attacker. Longer snake will survive, but will suffer damage
:param other: snake-defender
:return: True for survived, False for dead
"""
delta = len(self.body) - len(other.body)
if delta > 0:
self.body = self.body[:delta]
return True, False
if delta < 0:
other.body = other.body[:-delta]
print(other.body)
return False, True
return False, False
def __eq__(self, other):
return self.color == other
def __repr__(self):
return f'Snake {self.color}'
| StarcoderdataPython |
3509563 | <gh_stars>0
# Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
import six
from mistral.service import coordination
from mistral.tests.unit import base
class ServiceCoordinatorTest(base.BaseTest):
def setUp(self):
super(ServiceCoordinatorTest, self).setUp()
def test_start(self):
cfg.CONF.set_default(
'backend_url',
'zake://',
'coordination'
)
coordinator = coordination.ServiceCoordinator('fake_id')
coordinator.start()
self.assertTrue(coordinator.is_active())
def test_start_without_backend(self):
cfg.CONF.set_default('backend_url', None, 'coordination')
coordinator = coordination.ServiceCoordinator()
coordinator.start()
self.assertFalse(coordinator.is_active())
def test_stop_not_active(self):
cfg.CONF.set_default('backend_url', None, 'coordination')
coordinator = coordination.ServiceCoordinator()
coordinator.start()
coordinator.stop()
self.assertFalse(coordinator.is_active())
def test_stop(self):
cfg.CONF.set_default(
'backend_url',
'zake://',
'coordination'
)
coordinator = coordination.ServiceCoordinator()
coordinator.start()
coordinator.stop()
self.assertFalse(coordinator.is_active())
def test_join_group_not_active(self):
cfg.CONF.set_default('backend_url', None, 'coordination')
coordinator = coordination.ServiceCoordinator()
coordinator.start()
coordinator.join_group('fake_group')
members = coordinator.get_members('fake_group')
self.assertFalse(coordinator.is_active())
self.assertEqual(0, len(members))
def test_join_group_and_get_members(self):
cfg.CONF.set_default(
'backend_url',
'zake://',
'coordination'
)
coordinator = coordination.ServiceCoordinator(my_id='fake_id')
coordinator.start()
coordinator.join_group('fake_group')
members = coordinator.get_members('fake_group')
self.assertEqual(1, len(members))
self.assertItemsEqual((six.b('fake_id'),), members)
def test_join_group_and_leave_group(self):
cfg.CONF.set_default(
'backend_url',
'zake://',
'coordination'
)
coordinator = coordination.ServiceCoordinator(my_id='fake_id')
coordinator.start()
coordinator.join_group('fake_group')
members_before = coordinator.get_members('fake_group')
coordinator.leave_group('fake_group')
members_after = coordinator.get_members('fake_group')
self.assertEqual(1, len(members_before))
self.assertEqual(set([six.b('fake_id')]), members_before)
self.assertEqual(0, len(members_after))
self.assertEqual(set([]), members_after)
class ServiceTest(base.BaseTest):
def setUp(self):
super(ServiceTest, self).setUp()
# Re-initialize the global service coordinator object, in order to use
# new coordination configuration.
coordination.cleanup_service_coordinator()
@mock.patch('mistral.utils.get_process_identifier', return_value='fake_id')
def test_register_membership(self, mock_get_identifier):
cfg.CONF.set_default('backend_url', 'zake://', 'coordination')
srv = coordination.Service('fake_group')
srv.register_membership()
self.addCleanup(srv.stop)
srv_coordinator = coordination.get_service_coordinator()
self.assertIsNotNone(srv_coordinator)
self.assertTrue(srv_coordinator.is_active())
members = srv_coordinator.get_members('fake_group')
mock_get_identifier.assert_called_once_with()
self.assertEqual(set([six.b('fake_id')]), members)
| StarcoderdataPython |
78451 | <reponame>aspose-cells-cloud/aspose-cells-cloud-python<filename>test/test_cells_clear_object_api.py
# coding: utf-8
from __future__ import absolute_import
import os
import sys
import unittest
import warnings
ABSPATH = os.path.abspath(os.path.realpath(os.path.dirname(__file__)) + "/..")
sys.path.append(ABSPATH)
import asposecellscloud
from asposecellscloud.rest import ApiException
from asposecellscloud.apis.lite_cells_api import LiteCellsApi
import AuthUtil
global_api = None
class TestCellsClearObjectApi(unittest.TestCase):
""" CellsApi unit test stubs """
def setUp(self):
warnings.simplefilter('ignore', ResourceWarning)
global global_api
if global_api is None:
global_api = asposecellscloud.apis.lite_cells_api.LiteCellsApi(AuthUtil.GetClientId(),AuthUtil.GetClientSecret(),"v3.0",AuthUtil.GetBaseUrl())
self.api = global_api
def tearDown(self):
pass
def test_cells_clear_chart(self):
Book1 = os.path.dirname(os.path.realpath(__file__)) + "/../TestData/" + "Book1.xlsx"
myDocument = os.path.dirname(os.path.realpath(__file__)) + "/../TestData/" + "myDocument.xlsx"
result = self.api.post_clear_objects({ "Book1.xlsx" :Book1, "myDocument.xlsx":myDocument},"chart")
pass
def test_cells_clear_comment(self):
Book1 = os.path.dirname(os.path.realpath(__file__)) + "/../TestData/" + "Book1.xlsx"
myDocument = os.path.dirname(os.path.realpath(__file__)) + "/../TestData/" + "myDocument.xlsx"
result = self.api.post_clear_objects({ "Book1.xlsx" :Book1, "myDocument.xlsx":myDocument},"comment")
pass
def test_cells_clear_picture(self):
Book1 = os.path.dirname(os.path.realpath(__file__)) + "/../TestData/" + "Book1.xlsx"
myDocument = os.path.dirname(os.path.realpath(__file__)) + "/../TestData/" + "myDocument.xlsx"
result = self.api.post_clear_objects({ "Book1.xlsx" :Book1, "myDocument.xlsx":myDocument},"picture")
pass
def test_cells_clear_shape(self):
Book1 = os.path.dirname(os.path.realpath(__file__)) + "/../TestData/" + "Book1.xlsx"
myDocument = os.path.dirname(os.path.realpath(__file__)) + "/../TestData/" + "myDocument.xlsx"
result = self.api.post_clear_objects({ "Book1.xlsx" :Book1, "myDocument.xlsx":myDocument},"shape")
pass
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
11317224 | # 3rd-party modules
from lxml.builder import E
# module packages
from jnpr.junos.cfg import Resource
from jnpr.junos import jxml as JXML
class NatSrcPool(Resource):
"""
[edit security nat source pool <name>]
"""
PROPERTIES = [
'addr_from',
'addr_to'
]
def _xml_at_top(self):
"""
configuration to retrieve resource
"""
return E.security(E.nat(E.source(E.pool(E.name(self.name)))))
# -----------------------------------------------------------------------
# XML read
# -----------------------------------------------------------------------
def _xml_at_res(self, xml):
"""
return Element at resource
"""
return xml.find('.//pool')
def _xml_to_py(self, as_xml, to_py):
"""
converts Junos XML to native Python
"""
Resource._r_has_xml_status(as_xml, to_py)
to_py['addr_from'] = as_xml.find('address/name').text
to_py['addr_to'] = as_xml.find('address/to/ipaddr').text
# -----------------------------------------------------------------------
# XML property writers
# -----------------------------------------------------------------------
def _xml_change_addr_from(self, xml):
# we need to always set the address/name given the structure of the
# Junos configuration XML, derp.
addr_from = self.should.get('addr_from') or self.has.get('addr_from')
xml.append(E.address(JXML.REPLACE, E.name(addr_from)))
return True
def _xml_change_addr_to(self, xml):
# we must always include the addr_from, so if we didn't expliclity
# change it, we must do it now.
if 'addr_from' not in self.should:
self._xml_change_addr_from(xml)
x_addr = xml.find('address')
x_addr.append(E.to(E.ipaddr(self.should['addr_to'])))
return True
# -----------------------------------------------------------------------
# Resource List, Catalog
# -- only executed by 'manager' resources
# -----------------------------------------------------------------------
def _r_list(self):
"""
build the policy context resource list from the command~
> show security policies zone-context
"""
get = E.security(E.nat(E.source(
E.pool(JXML.NAMES_ONLY))))
got = self.D.rpc.get_config(get)
self._rlist = [name.text for name in got.xpath('.//pool/name')]
def _r_catalog(self):
get = E.security(E.nat(E.source(E.pool)))
got = self.D.rpc.get_config(get)
for pool in got.xpath('.//pool'):
name = pool.find('name').text
self._rcatalog[name] = {}
self._xml_to_py(pool, self._rcatalog[name])
| StarcoderdataPython |
8141361 | import extend_chain_operation
from create_dummy_chain import build_chain
import os
chain1 = extend_chain_operation.chain
classifiers = [os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers', 'sota_models_cifar10-32-dev_validation',
'V001_ResNet34_ref_0'),
os.path.join(os.environ['FCM'], 'Definitions', 'Classifiers', 'sota_models_cifar10-32-dev_validation',
'V001_VGG19_ref_0')
]
classifiers_id = ['ResNet34', 'VGG19']
thresholds = [0.9]
trigger_ids = ['trigger_classifier_0.8_ResNet34']
chain2 = build_chain(classifiers, classifiers_id, thresholds, trigger_ids, 'chain2_extend_operation')
from Source.genetic_algorithm.operations_breed import merge_two_chains
merged = merge_two_chains(chain1, chain2)
merged.set_sysid(extend_chain_operation.generate_system_id(merged))
print(merged.get_sysid())
from Source.system_evaluator import evaluate
from Source.system_evaluator_utils import pretty_print
R = evaluate(merged, merged.get_start())
pretty_print(R)
| StarcoderdataPython |
1919653 | #
# Code under the MIT license by <NAME>
#
from mc import *
import time
import os
mc = Minecraft()
bridge = []
while True:
pos = mc.player.getTilePos()
pos.y = pos.y - 1
belowBlock = mc.getBlock(pos)
if belowBlock == AIR.id or belowBlock == WATER_FLOWING.id or belowBlock == WATER_STATIONARY.id:
bridge.append(pos)
mc.setBlock(pos, STAINED_GLASS_BLUE)
if len(bridge) > 10:
firstPos = bridge.pop(0)
if not firstPos in bridge:
mc.setBlock(firstPos, AIR)
time.sleep(0.05)
| StarcoderdataPython |
9605694 | <filename>src/p4pktgen/core/strategy.py<gh_stars>0
from collections import OrderedDict, defaultdict
import json
import time
import logging
from random import shuffle
import operator
import random
from enum import Enum
from p4pktgen.config import Config
from p4pktgen.core.translator import TestPathResult
from p4pktgen.util.graph import GraphVisitor, VisitResult
from p4pktgen.util.statistics import Statistics
from p4pktgen.hlir.transition import ActionTransition, ParserTransition
class ParserGraphVisitor(GraphVisitor):
def __init__(self, hlir):
super(ParserGraphVisitor, self).__init__()
self.hlir = hlir
self.all_paths = []
def count(self, stack_counts, state_name):
if state_name != 'sink':
state = self.hlir.get_parser_state(state_name)
for extract in state.header_stack_extracts:
stack_counts[extract] += 1
def preprocess_edges(self, path, edges):
filtered_edges = []
for edge in edges:
if edge.dst != 'sink' and isinstance(edge, ParserTransition):
state = self.hlir.get_parser_state(edge.dst)
if state.has_header_stack_extracts():
stack_counts = defaultdict(int)
if len(path) > 0:
self.count(stack_counts, path[0].src)
for e in path:
self.count(stack_counts, e.dst)
self.count(stack_counts, edge.dst)
# If one of the header stacks is overful, remove the edge
valid = True
for stack, count in stack_counts.items():
if self.hlir.get_header_stack(stack).size < count:
valid = False
break
if not valid:
continue
filtered_edges.append(edge)
return filtered_edges
def visit(self, path, is_complete_path):
if is_complete_path:
self.all_paths.append(path)
return VisitResult.CONTINUE
def backtrack(self):
pass
class PathCoverageGraphVisitor(GraphVisitor):
def __init__(self, translator, parser_path, source_info_to_node_name,
results, test_case_writer):
super(PathCoverageGraphVisitor, self).__init__()
self.translator = translator
self.parser_path = parser_path
self.source_info_to_node_name = source_info_to_node_name
self.path_count = 0
self.results = results
self.test_case_writer = test_case_writer
self.stats_per_traversal = defaultdict(int)
def preprocess_edges(self, path, edges):
return edges
def visit(self, control_path, is_complete_control_path):
self.path_count += 1
self.translator.push()
expected_path, result, test_case, packet_lst = \
self.translator.generate_constraints(
self.parser_path, control_path,
self.source_info_to_node_name, self.path_count, is_complete_control_path)
if result == TestPathResult.SUCCESS and is_complete_control_path:
Statistics().avg_full_path_len.record(
len(self.parser_path + control_path))
if not Config().get_try_least_used_branches_first():
for e in control_path:
if Statistics().stats_per_control_path_edge[e] == 0:
Statistics().num_covered_edges += 1
Statistics().stats_per_control_path_edge[e] += 1
if result == TestPathResult.NO_PACKET_FOUND:
Statistics().avg_unsat_path_len.record(
len(self.parser_path + control_path))
Statistics().count_unsat_paths.inc()
if Config().get_record_statistics():
Statistics().record(result, is_complete_control_path, self.translator)
record_result = (is_complete_control_path
or (result != TestPathResult.SUCCESS))
if record_result:
# Doing file writing here enables getting at least
# some test case output data for p4pktgen runs that
# the user kills before it completes, e.g. because it
# takes too long to complete.
self.test_case_writer.write(test_case, packet_lst)
result_path = [n.src for n in self.parser_path
] + ['sink'] + [(n.src, n) for n in control_path]
result_path_tuple = tuple(expected_path)
if result_path_tuple in self.results and self.results[result_path_tuple] != result:
logging.error("result_path %s with result %s"
" is already recorded in results"
" while trying to record different result %s"
"" % (result_path,
self.results[result_path_tuple], result))
#assert False
self.results[tuple(result_path)] = result
if result == TestPathResult.SUCCESS and is_complete_control_path:
for x in control_path:
Statistics().stats_per_control_path_edge[x] += 1
now = time.time()
# Use real time to avoid printing these details
# too often in the output log.
if now - Statistics(
).last_time_printed_stats_per_control_path_edge >= 30:
Statistics().log_control_path_stats(
Statistics().stats_per_control_path_edge,
Statistics().num_control_path_edges)
Statistics(
).last_time_printed_stats_per_control_path_edge = now
Statistics().stats[result] += 1
self.stats_per_traversal[result] += 1
visit_result = None
tmp_num = Config().get_max_paths_per_parser_path()
if (tmp_num
and self.stats_per_traversal[TestPathResult.SUCCESS] >= tmp_num):
# logging.info("Already found %d packets for parser path %d of %d."
# " Backing off so we can get to next parser path ASAP"
# "" % (self.stats_per_traversal[TestPathResult.SUCCESS],
# parser_path_num, len(parser_paths)))
visit_result = VisitResult.BACKTRACK
else:
visit_result = VisitResult.CONTINUE if result == TestPathResult.SUCCESS else VisitResult.BACKTRACK
if is_complete_control_path and result == TestPathResult.SUCCESS:
Statistics().num_test_cases += 1
if Statistics().num_test_cases == Config().get_num_test_cases():
visit_result = VisitResult.ABORT
return visit_result
def backtrack(self):
self.translator.pop()
EdgeLabels = Enum('EdgeLabels', 'UNVISITED VISITED DONE')
class EdgeCoverageGraphVisitor(PathCoverageGraphVisitor):
def __init__(self, graph, labels, translator, parser_path, source_info_to_node_name,
results, test_case_writer):
super(EdgeCoverageGraphVisitor, self).__init__(translator, parser_path, source_info_to_node_name, results, test_case_writer)
self.graph = graph
self.labels = labels
self.ccc = 0
def preprocess_edges(self, path, edges):
if Config().get_random_tlubf():
shuffle(edges)
return edges
custom_order = sorted(
edges, key=lambda t: Statistics().stats_per_control_path_edge[t])
return reversed(custom_order)
visited_es = []
unvisited_es = []
path_has_new_edges = False
for e in path:
if self.labels[e] == EdgeLabels.UNVISITED:
path_has_new_edges = True
break
for e in edges:
label = self.labels[e]
if label == EdgeLabels.UNVISITED:
unvisited_es.append(e)
elif label == EdgeLabels.VISITED:
visited_es.append(e)
else:
assert label == EdgeLabels.DONE
if path_has_new_edges:
visited_es.append(e)
# shuffle(visited_es)
#shuffle(unvisited_es)
return list(reversed(visited_es)) + list(reversed(unvisited_es))
def visit(self, control_path, is_complete_control_path):
visit_result = super(EdgeCoverageGraphVisitor, self).visit(control_path, is_complete_control_path)
if visit_result == VisitResult.CONTINUE and is_complete_control_path:
is_done = True
for e in reversed(control_path):
label = self.labels[e]
if label == EdgeLabels.UNVISITED:
Statistics().num_covered_edges += 1
self.labels[e] = EdgeLabels.VISITED
if is_done and label != EdgeLabels.DONE:
all_out_es_done = True
for oe in self.graph.get_neighbors(e.dst):
if self.labels[oe] != EdgeLabels.DONE:
all_out_es_done = False
break
if all_out_es_done:
for ie in self.graph.get_in_edges(e.dst):
if self.labels[ie] == EdgeLabels.VISITED:
Statistics().num_done += 1
self.labels[ie] = EdgeLabels.DONE
else:
is_done = False
Statistics().dump()
print(len(set(self.labels.keys())))
visit_result = VisitResult.ABORT
"""
c = 0
for k, v in self.labels.items():
if v == EdgeLabels.UNVISITED:
print(k)
c += 1
if c == 10:
break
self.ccc = 0
"""
"""
if visit_result == VisitResult.CONTINUE and not is_complete_control_path:
path_has_new_edges = False
for e in control_path:
if self.labels[e] == EdgeLabels.UNVISITED:
path_has_new_edges = True
break
if path_has_new_edges:
self.ccc = 0
if visit_result == VisitResult.BACKTRACK:
self.ccc += 1
if self.ccc == 100:
visit_result = VisitResult.ABORT
"""
return visit_result
class LeastUsedPaths:
def __init__(self, hlir, graph, start, visitor):
self.graph = graph
self.path_count = defaultdict(int)
self.start = start
self.visitor = visitor
self.hlir = hlir
def choose_edge(self, edges):
if Config().get_random_tlubf():
return random.choice(edges)
edge_counts = [self.path_count[e] for e in edges]
min_index, min_value = min(enumerate(edge_counts), key=operator.itemgetter(1))
return edges[min_index]
def count(self, stack_counts, state_name):
if state_name != 'sink':
state = self.hlir.get_parser_state(state_name)
for extract in state.header_stack_extracts:
stack_counts[extract] += 1
def preprocess_edges(self, path, edges):
filtered_edges = []
for edge in edges:
if edge.dst != 'sink' and isinstance(edge, ParserTransition):
state = self.hlir.get_parser_state(edge.dst)
if state.has_header_stack_extracts():
stack_counts = defaultdict(int)
if len(path) > 0:
self.count(stack_counts, path[0].src)
for e in path:
self.count(stack_counts, e.dst)
self.count(stack_counts, edge.dst)
# If one of the header stacks is overful, remove the edge
valid = True
for stack, count in stack_counts.items():
if self.hlir.get_header_stack(stack).size < count:
valid = False
break
if not valid:
continue
filtered_edges.append(edge)
return filtered_edges
def visit(self):
while Statistics().num_covered_edges < Statistics().num_control_path_edges:
path = []
next_node = self.start
while self.graph.get_neighbors(next_node):
edges = self.graph.get_neighbors(next_node)
if len(edges) == 0:
break
edges = self.preprocess_edges(path, edges)
edge = self.choose_edge(edges)
path.append(edge)
next_node = edge.dst
for e in path:
if self.path_count[e] == 0:
Statistics().num_covered_edges += 1
self.path_count[e] += 1
self.visitor.visit(path)
class TLUBFParserVisitor:
def __init__(self, graph, labels, translator, source_info_to_node_name, results, test_case_writer, in_pipeline):
self.graph = graph
self.labels = labels
self.translator = translator
self.source_info_to_node_name = source_info_to_node_name
self.results = results
self.test_case_writer = test_case_writer
self.in_pipeline = in_pipeline
def visit(self, parser_path):
print("VISIT", parser_path)
if not self.translator.generate_parser_constraints(parser_path):
# Skip unsatisfiable parser paths
return
graph_visitor = EdgeCoverageGraphVisitor(self.graph, self.labels, self.translator, parser_path,
self.source_info_to_node_name,
self.results, self.test_case_writer)
self.graph.visit_all_paths(self.in_pipeline.init_table_name, None, graph_visitor)
| StarcoderdataPython |
3486572 | <reponame>jackadamson/facebooklogger
from pymessenger.bot import Bot
import logging
from facebooklogger.settings import PAGE_ACCESS_TOKEN, FB_USER_ID
class FacebookLogger(logging.Handler):
def __init__(
self, user_id=FB_USER_ID, access_token=PAGE_ACCESS_TOKEN, level=logging.NOTSET
):
if access_token is None:
raise ValueError(
"access_token not provided, set the PAGE_ACCESS_TOKEN environment variable"
)
if access_token is None:
raise ValueError(
"user_id not provided, set the FB_USER_ID environment variable"
)
logging.Handler.__init__(self, level)
if FB_USER_ID is None:
raise ValueError("FB_USER_ID not set")
logging.Handler.__init__(self, level)
self.setFormatter(FacebookLoggerFormatter())
self.user_id = user_id
self.bot = Bot(access_token)
def emit(self, record: logging.LogRecord):
self.bot.send_text_message(self.user_id, self.format(record))
class FacebookLoggerFormatter(logging.Formatter):
level_formats = {
"CRITICAL": "❌❌❌ {}: {}",
"ERROR": "❌ {}: {}",
"WARNING": "⚠ {}: {}",
"INFO": "✅ {}: {}",
"DEBUG": "🐛 {}: {}",
}
def format(self, record: logging.LogRecord):
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
return self.formatMessage(record)
def formatMessage(self, record: logging.LogRecord):
try:
msg_format = self.level_formats[record.levelname]
except KeyError:
return "🍆 {} errno {}: {}".format(
record.name, record.levelno, record.getMessage()
)
return msg_format.format(record.name, record.getMessage())
| StarcoderdataPython |
6692460 | <gh_stars>0
# %%
from folktables import ACSDataSource, ACSIncome
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import roc_auc_score
import pandas as pd
from collections import defaultdict
from xgboost import XGBRegressor, XGBClassifier
from scipy.stats import kstest
import shap
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
import sys
sys.path.append("../")
from fairtools.xaiUtils import ShapEstimator
import random
random.seed(0)
# %%
# Load data
data_source = ACSDataSource(survey_year="2018", horizon="1-Year", survey="person")
ca_data = data_source.get_data(states=["CA"], download=True)
mi_data = data_source.get_data(states=["MI"], download=True)
ca_features, ca_labels, ca_group = ACSIncome.df_to_numpy(ca_data)
mi_features, mi_labels, mi_group = ACSIncome.df_to_numpy(mi_data)
## Conver to DF
ca_features = pd.DataFrame(ca_features, columns=ACSIncome.features)
mi_features = pd.DataFrame(mi_features, columns=ACSIncome.features)
# %%
# Modeling
model = XGBClassifier()
# Train on CA data
preds_ca = cross_val_predict(model, ca_features, ca_labels, cv=3)
model.fit(ca_features, ca_labels)
# Test on MI data
preds_mi = model.predict(mi_features)
# %%
##Fairness
white_tpr = np.mean(preds_ca[(ca_labels == 1) & (ca_group == 1)])
black_tpr = np.mean(preds_ca[(ca_labels == 1) & (ca_group == 2)])
print("Train EO", white_tpr - black_tpr)
white_tpr = np.mean(preds_mi[(mi_labels == 1) & (mi_group == 1)])
black_tpr = np.mean(preds_mi[(mi_labels == 1) & (mi_group == 2)])
print("Test EO", white_tpr - black_tpr)
# %%
## Model performance
print(roc_auc_score(preds_ca, ca_labels))
print(roc_auc_score(preds_mi, mi_labels))
# %%
# Input KS
for feat in ca_features.columns:
pval = kstest(ca_features[feat], mi_features[feat]).pvalue
if pval < 0.1:
print(feat, " is distinct ", pval)
else:
print(feat, " is equivalent ", pval)
# %%
# %%
# Explainability
explainer = shap.Explainer(model)
shap_values = explainer(ca_features)
ca_shap = pd.DataFrame(shap_values.values, columns=ca_features.columns)
shap_values = explainer(mi_features)
mi_shap = pd.DataFrame(shap_values.values, columns=ca_features.columns)
# %%
# SHAP KS
for feat in ca_features.columns:
pval = kstest(ca_shap[feat], mi_shap[feat]).pvalue
if pval < 0.1:
print(feat, " is distinct ", pval)
else:
print(feat, " is equivalent ", pval)
# %%
## Shap Estimator on CA and MI
se = ShapEstimator(model=XGBRegressor())
shap_pred_ca = cross_val_predict(se, ca_features, ca_labels, cv=3)
shap_pred_ca = pd.DataFrame(shap_pred_ca, columns=ca_features.columns)
shap_pred_ca = shap_pred_ca.add_suffix("_shap")
se.fit(ca_features, ca_labels)
error_ca = ca_labels == preds_ca
# %%
# Estimators for the loop
estimators = defaultdict()
estimators["Linear"] = Pipeline(
[("scaler", StandardScaler()), ("model", LogisticRegression())]
)
estimators["RandomForest"] = RandomForestClassifier(random_state=0)
estimators["XGBoost"] = XGBClassifier(random_state=0)
estimators["MLP"] = MLPClassifier(random_state=0)
# %%
# Loop over different G estimators
for estimator in estimators:
print(estimator)
clf = estimators[estimator]
preds_ca_shap = cross_val_predict(
clf, shap_pred_ca, error_ca, cv=3, method="predict_proba"
)[:, 1]
clf.fit(shap_pred_ca, error_ca)
shap_pred_mi = se.predict(mi_features)
shap_pred_mi = pd.DataFrame(shap_pred_mi, columns=ca_features.columns)
shap_pred_mi = shap_pred_mi.add_suffix("_shap")
error_mi = mi_labels == preds_mi
preds_mi_shap = clf.predict_proba(shap_pred_mi)[:, 1]
## Only SHAP
print("Only Shap")
print(roc_auc_score(error_ca, preds_ca_shap))
print(roc_auc_score(error_mi, preds_mi_shap))
## Only data
print("Only Data")
preds_ca_shap = cross_val_predict(
clf, ca_features, error_ca, cv=3, method="predict_proba"
)[:, 1]
clf.fit(ca_features, error_ca)
preds_mi_shap = clf.predict_proba(mi_features)[:, 1]
print(roc_auc_score(error_ca, preds_ca_shap))
print(roc_auc_score(error_mi, preds_mi_shap))
## SHAP + Data
print("Shap + Data")
ca_full = pd.concat([shap_pred_ca, ca_features], axis=1)
mi_full = pd.concat([shap_pred_mi, mi_features], axis=1)
preds_ca_shap = cross_val_predict(
clf, ca_full, error_ca, cv=3, method="predict_proba"
)[:, 1]
clf.fit(ca_full, error_ca)
preds_mi_shap = clf.predict_proba(mi_full)[:, 1]
print(roc_auc_score(error_ca, preds_ca_shap))
print(roc_auc_score(error_mi, preds_mi_shap))
# %%
# Original Error
| StarcoderdataPython |
237884 | import pickle
def get_random_pgm_groups():
interval = [0, 1000, 5000, 10000, 50000, 100000]
buckets = {}
for i in range(len(interval)-1):
buckets[i] = []
valid_count = 0
with open('baseline.txt') as f:
lines = f.readlines()
print("Ignore Header Line: {}".format(lines[0]))
lines = lines[1:]
for line in lines:
data = line.split('|')
bm = data[0]
o0_cycle = int(data[1])
o3_cycle = int(data[2])
if o0_cycle == 10000000:
#print("{}".format(bm))
continue
valid_count += 1
for i in range(len(interval)-1):
if o0_cycle >= interval[i] and o0_cycle < interval[i+1]:
buckets[i].append(data)
break
print("Total {} valid programs".format(valid_count))
for i in range(len(interval)-1):
print("Interval {} ~ {}: {}".format(interval[i], interval[i+1],len(buckets[i])))
output = open('random_pgms.pkl', 'wb')
pickle.dump(buckets, output)
output.close()
get_random_pgm_groups()
| StarcoderdataPython |
1638262 | <filename>src/vpack/utils.py
import logging
# logging.basicConfig()
logger = logging.getLogger("vpack")
logger.propagate = False
logger.setLevel(logging.INFO)
c_handler = logging.StreamHandler()
c_handler.setLevel(logging.INFO)
c_format = logging.Formatter('(%(name)s): %(message)s')
c_handler.setFormatter(c_format)
logger.addHandler(c_handler)
def get_logger():
return logger
| StarcoderdataPython |
3291338 | <reponame>basnijholt/HASS-data-detective
"""Tests for config package."""
import os
import tempfile
from unittest.mock import patch
import pytest
from detective import config
def test_find_hass_config():
"""Test finding HASS config."""
with patch.dict(os.environ, {"HASSIO_TOKEN": "yo"}):
assert config.find_hass_config() == "/config"
with patch.dict(os.environ, {}, clear=True), patch(
"detective.config.default_hass_config_dir", return_value="default-dir"
), patch("os.path.isdir", return_value=True):
assert config.find_hass_config() == "default-dir"
with patch.dict(os.environ, {}, clear=True), patch(
"os.path.isdir", return_value=False
), pytest.raises(ValueError):
config.find_hass_config()
def test_load_hass_config():
"""Test loading hass config."""
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, "configuration.yaml"), "wt") as fp:
fp.write(
"""
mock_secret: !secret some_secret
included: !include included.yaml
mock_env: !env_var MOCK_ENV
mock_env: !env_var MOCK_ENV
mock_dir_list: !include_dir_list ./zxc
mock_dir_merge_list: !include_dir_merge_list ./zxc
mock_dir_named: !include_dir_named ./zxc
mock_dir_merge: !include_dir_merge_named ./zxc
# Trigger duplicate error
mock_secret: !secret other_secret
"""
)
with open(os.path.join(tmpdir, "secrets.yaml"), "wt") as fp:
fp.write(
"""
some_secret: test-some-secret
other_secret: test-other-secret
"""
)
with open(os.path.join(tmpdir, "included.yaml"), "wt") as fp:
fp.write(
"""
some: value
"""
)
configuration = config.load_hass_config(tmpdir)
# assert configuration["mock_secret"] == "test-other-secret" # TODO: fix
assert configuration["included"] == {"some": "value"}
def test_db_url_from_hass_config():
"""Test extracting recorder url from config."""
with patch("detective.config.load_hass_config", return_value={}), patch(
"os.path.isfile", return_value=False
), pytest.raises(ValueError):
config.db_url_from_hass_config("mock-path")
with patch("detective.config.load_hass_config", return_value={}), patch(
"os.path.isfile", return_value=True
):
assert (
config.db_url_from_hass_config("mock-path")
== "sqlite:///mock-path/home-assistant_v2.db"
)
with patch(
"detective.config.load_hass_config",
return_value={"recorder": {"db_url": "mock-url"}},
):
assert config.db_url_from_hass_config("mock-path") == "mock-url"
| StarcoderdataPython |
5170473 | <gh_stars>0
import getpass
from selenium.webdriver import Firefox
from time import sleep
def open_browser(browser, url):
return browser.get(url)
def close_broser(browser):
return browser.quit()
def fill_form(browser, form, nome, senha):
browser.find_element_by_css_selector(
f'.form-{form} input[name="nome"]').send_keys(nome)
browser.find_element_by_css_selector(
f'.form-{form} input[name="senha"]').send_keys(senha)
sleep(1)
browser.find_element_by_css_selector(
f'.form-{form} input[name="{form}"]').click()
def get_header_text(browser):
return browser.find_element_by_css_selector('p span').text
def form_iterate(browser, data, done):
while done not in get_header_text(browser):
_form = browser.find_element_by_css_selector('p span').text
fill_form(browser, _form, **data)
sleep(1)
def get_it_done(browser, url, data):
open_browser(browser, url)
sleep(2)
done = "você conseguiu terminar"
form_iterate(browser, data, done)
if done in get_header_text(browser):
return f'\n{":"*32}\n:: {done.upper()}!!! ::\n{":"*32}\n'
else:
print('Houve algum problema, vamos tentar novamente!')
if __name__ == "__main__":
browser = Firefox()
url = 'https://selenium.dunossauro.live/exercicio_06.html'
name = passwd = ''
while len(name) < 1 > len(passwd):
name = input('Digite o seu nome: ')
passwd = getpass.getpass(prompt='Digite a sua senha: ')
data = {
'nome': name,
'senha': <PASSWORD>,
}
print(get_it_done(browser, url, data))
close_broser(browser)
| StarcoderdataPython |
1882356 | from setuptools import setup, find_packages
from pathlib import Path
requirements = []
__version__ = "0.1.1"
# Add README to PyPI
this_dir = Path(__file__).parent
with open(Path.joinpath(this_dir, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="template-python-package",
version=__version__,
packages=find_packages(),
license="MIT",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/umesh-timalsina/template-python-package",
install_requires=requirements,
python_requires=">=3.6, <4",
include_package_data=True,
zip_safe=False,
description="A template python package for testing pypi releases with azure pipelines",
long_description=long_description,
long_description_content_type="text/markdown",
)
| StarcoderdataPython |
3543735 | # -*- coding: utf-8 -*-
# (C) Copyright IBM Corp. 2018, 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator
import inspect
import json
import pytest
import responses
import ibm_watson.assistant_v1
from ibm_watson.assistant_v1 import *
base_url = "https://gateway.watsonplatform.net/assistant/api"
##############################################################################
# Start of Service: Message
##############################################################################
# region
# -----------------------------------------------------------------------------
# Test Class for message
# -----------------------------------------------------------------------------
class TestMessage:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_message_response(self):
body = self.construct_full_body()
response = fake_response_MessageResponse_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_message_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_MessageResponse_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_message_empty(self):
check_empty_required_params(self, fake_response_MessageResponse_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/message".format(body["workspace_id"])
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.message(**body)
return output
def construct_full_body(self):
body = {"workspace_id": "string1"}
body.update(
{
"input": MessageInput._from_dict(
json.loads("""{"text": "fake_text"}""")
),
"intents": [],
"entities": [],
"alternate_intents": True,
"context": Context._from_dict(
json.loads(
"""{"conversation_id": "fake_conversation_id", "system": {}, "metadata": {"deployment": "fake_deployment", "user_id": "fake_user_id"}}"""
)
),
"output": OutputData._from_dict(
json.loads(
"""{"nodes_visited": [], "nodes_visited_details": [], "log_messages": [], "text": [], "generic": []}"""
)
),
}
)
body["nodes_visited_details"] = True
return body
def construct_required_body(self):
body = {"workspace_id": "string1"}
return body
# endregion
##############################################################################
# End of Service: Message
##############################################################################
##############################################################################
# Start of Service: Workspaces
##############################################################################
# region
# -----------------------------------------------------------------------------
# Test Class for list_workspaces
# -----------------------------------------------------------------------------
class TestListWorkspaces:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_workspaces_response(self):
body = self.construct_full_body()
response = fake_response_WorkspaceCollection_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_workspaces_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_WorkspaceCollection_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_list_workspaces_empty(self):
check_empty_response(self)
assert len(responses.calls) == 1
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces"
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.list_workspaces(**body)
return output
def construct_full_body(self):
body = {
"page_limit": 12345,
"sort": "string1",
"cursor": "string1",
"include_audit": True,
}
return body
def construct_required_body(self):
body = {}
return body
# -----------------------------------------------------------------------------
# Test Class for create_workspace
# -----------------------------------------------------------------------------
class TestCreateWorkspace:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_create_workspace_response(self):
body = self.construct_full_body()
response = fake_response_Workspace_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_create_workspace_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_Workspace_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_create_workspace_empty(self):
check_empty_response(self)
assert len(responses.calls) == 1
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces"
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=201,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.create_workspace(**body)
return output
def construct_full_body(self):
body = {}
body.update(
{
"name": "string1",
"description": "string1",
"language": "string1",
"metadata": {"mock": "data"},
"learning_opt_out": True,
"system_settings": WorkspaceSystemSettings._from_dict(
json.loads(
"""{"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "fake_prompt", "none_of_the_above_prompt": "fake_none_of_the_above_prompt", "enabled": false, "sensitivity": "fake_sensitivity", "randomize": false, "max_suggestions": 15, "suggestion_text_policy": "fake_suggestion_text_policy"}, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}"""
)
),
"intents": [],
"entities": [],
"dialog_nodes": [],
"counterexamples": [],
"webhooks": [],
}
)
body["include_audit"] = True
return body
def construct_required_body(self):
body = {}
return body
# -----------------------------------------------------------------------------
# Test Class for get_workspace
# -----------------------------------------------------------------------------
class TestGetWorkspace:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_get_workspace_response(self):
body = self.construct_full_body()
response = fake_response_Workspace_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_get_workspace_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_Workspace_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_get_workspace_empty(self):
check_empty_required_params(self, fake_response_Workspace_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}".format(body["workspace_id"])
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.get_workspace(**body)
return output
def construct_full_body(self):
body = {
"workspace_id": "string1",
"export": True,
"include_audit": True,
"sort": "string1",
}
return body
def construct_required_body(self):
body = {"workspace_id": "string1"}
return body
# -----------------------------------------------------------------------------
# Test Class for update_workspace
# -----------------------------------------------------------------------------
class TestUpdateWorkspace:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_update_workspace_response(self):
body = self.construct_full_body()
response = fake_response_Workspace_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_update_workspace_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_Workspace_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_update_workspace_empty(self):
check_empty_required_params(self, fake_response_Workspace_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}".format(body["workspace_id"])
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.update_workspace(**body)
return output
def construct_full_body(self):
body = {"workspace_id": "string1"}
body.update(
{
"name": "string1",
"description": "string1",
"language": "string1",
"metadata": {"mock": "data"},
"learning_opt_out": True,
"system_settings": WorkspaceSystemSettings._from_dict(
json.loads(
"""{"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "fake_prompt", "none_of_the_above_prompt": "fake_none_of_the_above_prompt", "enabled": false, "sensitivity": "fake_sensitivity", "randomize": false, "max_suggestions": 15, "suggestion_text_policy": "fake_suggestion_text_policy"}, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}"""
)
),
"intents": [],
"entities": [],
"dialog_nodes": [],
"counterexamples": [],
"webhooks": [],
}
)
body["append"] = True
body["include_audit"] = True
return body
def construct_required_body(self):
body = {"workspace_id": "string1"}
return body
# -----------------------------------------------------------------------------
# Test Class for delete_workspace
# -----------------------------------------------------------------------------
class TestDeleteWorkspace:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_workspace_response(self):
body = self.construct_full_body()
response = fake_response__json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_workspace_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response__json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_workspace_empty(self):
check_empty_required_params(self, fake_response__json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}".format(body["workspace_id"])
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.DELETE,
url,
body=json.dumps(response),
status=200,
content_type="",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.delete_workspace(**body)
return output
def construct_full_body(self):
body = {"workspace_id": "string1"}
return body
def construct_required_body(self):
body = {"workspace_id": "string1"}
return body
# endregion
##############################################################################
# End of Service: Workspaces
##############################################################################
##############################################################################
# Start of Service: Intents
##############################################################################
# region
# -----------------------------------------------------------------------------
# Test Class for list_intents
# -----------------------------------------------------------------------------
class TestListIntents:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_intents_response(self):
body = self.construct_full_body()
response = fake_response_IntentCollection_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_intents_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_IntentCollection_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_list_intents_empty(self):
check_empty_required_params(self, fake_response_IntentCollection_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/intents".format(body["workspace_id"])
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.list_intents(**body)
return output
def construct_full_body(self):
body = {
"workspace_id": "string1",
"export": True,
"page_limit": 12345,
"sort": "string1",
"cursor": "string1",
"include_audit": True,
}
return body
def construct_required_body(self):
body = {"workspace_id": "string1"}
return body
# -----------------------------------------------------------------------------
# Test Class for create_intent
# -----------------------------------------------------------------------------
class TestCreateIntent:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_create_intent_response(self):
body = self.construct_full_body()
response = fake_response_Intent_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_create_intent_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_Intent_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_create_intent_empty(self):
check_empty_required_params(self, fake_response_Intent_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/intents".format(body["workspace_id"])
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=201,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.create_intent(**body)
return output
def construct_full_body(self):
body = {"workspace_id": "string1"}
body.update(
{
"intent": "string1",
"description": "string1",
"examples": [],
}
)
body["include_audit"] = True
return body
def construct_required_body(self):
body = {"workspace_id": "string1"}
body.update(
{
"intent": "string1",
"description": "string1",
"examples": [],
}
)
return body
# -----------------------------------------------------------------------------
# Test Class for get_intent
# -----------------------------------------------------------------------------
class TestGetIntent:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_get_intent_response(self):
body = self.construct_full_body()
response = fake_response_Intent_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_get_intent_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_Intent_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_get_intent_empty(self):
check_empty_required_params(self, fake_response_Intent_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/intents/{1}".format(
body["workspace_id"], body["intent"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.get_intent(**body)
return output
def construct_full_body(self):
body = {
"workspace_id": "string1",
"intent": "string1",
"export": True,
"include_audit": True,
}
return body
def construct_required_body(self):
body = {"workspace_id": "string1", "intent": "string1"}
return body
# -----------------------------------------------------------------------------
# Test Class for update_intent
# -----------------------------------------------------------------------------
class TestUpdateIntent:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_update_intent_response(self):
body = self.construct_full_body()
response = fake_response_Intent_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_update_intent_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_Intent_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_update_intent_empty(self):
check_empty_required_params(self, fake_response_Intent_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/intents/{1}".format(
body["workspace_id"], body["intent"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.update_intent(**body)
return output
def construct_full_body(self):
body = {"workspace_id": "string1", "intent": "string1"}
body.update(
{
"new_intent": "string1",
"new_description": "string1",
"new_examples": [],
}
)
body["append"] = True
body["include_audit"] = True
return body
def construct_required_body(self):
body = {"workspace_id": "string1", "intent": "string1"}
body.update(
{
"new_intent": "string1",
"new_description": "string1",
"new_examples": [],
}
)
return body
# -----------------------------------------------------------------------------
# Test Class for delete_intent
# -----------------------------------------------------------------------------
class TestDeleteIntent:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_intent_response(self):
body = self.construct_full_body()
response = fake_response__json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_intent_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response__json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_intent_empty(self):
check_empty_required_params(self, fake_response__json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/intents/{1}".format(
body["workspace_id"], body["intent"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.DELETE,
url,
body=json.dumps(response),
status=200,
content_type="",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.delete_intent(**body)
return output
def construct_full_body(self):
body = {"workspace_id": "string1", "intent": "string1"}
return body
def construct_required_body(self):
body = {"workspace_id": "string1", "intent": "string1"}
return body
# endregion
##############################################################################
# End of Service: Intents
##############################################################################
##############################################################################
# Start of Service: Examples
##############################################################################
# region
# -----------------------------------------------------------------------------
# Test Class for list_examples
# -----------------------------------------------------------------------------
class TestListExamples:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_examples_response(self):
body = self.construct_full_body()
response = fake_response_ExampleCollection_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_examples_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_ExampleCollection_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_list_examples_empty(self):
check_empty_required_params(self, fake_response_ExampleCollection_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/intents/{1}/examples".format(
body["workspace_id"], body["intent"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.list_examples(**body)
return output
def construct_full_body(self):
body = {
"workspace_id": "string1",
"intent": "string1",
"page_limit": 12345,
"sort": "string1",
"cursor": "string1",
"include_audit": True,
}
return body
def construct_required_body(self):
body = {"workspace_id": "string1", "intent": "string1"}
return body
# -----------------------------------------------------------------------------
# Test Class for create_example
# -----------------------------------------------------------------------------
class TestCreateExample:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_create_example_response(self):
body = self.construct_full_body()
response = fake_response_Example_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_create_example_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_Example_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_create_example_empty(self):
check_empty_required_params(self, fake_response_Example_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/intents/{1}/examples".format(
body["workspace_id"], body["intent"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=201,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.create_example(**body)
return output
def construct_full_body(self):
body = {"workspace_id": "string1", "intent": "string1"}
body.update(
{
"text": "string1",
"mentions": [],
}
)
body["include_audit"] = True
return body
def construct_required_body(self):
body = {"workspace_id": "string1", "intent": "string1"}
body.update(
{
"text": "string1",
"mentions": [],
}
)
return body
# -----------------------------------------------------------------------------
# Test Class for get_example
# -----------------------------------------------------------------------------
class TestGetExample:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_get_example_response(self):
body = self.construct_full_body()
response = fake_response_Example_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_get_example_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_Example_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_get_example_empty(self):
check_empty_required_params(self, fake_response_Example_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/intents/{1}/examples/{2}".format(
body["workspace_id"], body["intent"], body["text"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.get_example(**body)
return output
def construct_full_body(self):
body = {
"workspace_id": "string1",
"intent": "string1",
"text": "string1",
"include_audit": True,
}
return body
def construct_required_body(self):
body = {"workspace_id": "string1",
"intent": "string1", "text": "string1"}
return body
# -----------------------------------------------------------------------------
# Test Class for update_example
# -----------------------------------------------------------------------------
class TestUpdateExample:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_update_example_response(self):
body = self.construct_full_body()
response = fake_response_Example_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_update_example_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_Example_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_update_example_empty(self):
check_empty_required_params(self, fake_response_Example_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/intents/{1}/examples/{2}".format(
body["workspace_id"], body["intent"], body["text"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.update_example(**body)
return output
def construct_full_body(self):
body = {"workspace_id": "string1",
"intent": "string1", "text": "string1"}
body.update(
{
"new_text": "string1",
"new_mentions": [],
}
)
body["include_audit"] = True
return body
def construct_required_body(self):
body = {"workspace_id": "string1",
"intent": "string1", "text": "string1"}
body.update(
{
"new_text": "string1",
"new_mentions": [],
}
)
return body
# -----------------------------------------------------------------------------
# Test Class for delete_example
# -----------------------------------------------------------------------------
class TestDeleteExample:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_example_response(self):
body = self.construct_full_body()
response = fake_response__json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_example_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response__json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_example_empty(self):
check_empty_required_params(self, fake_response__json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/intents/{1}/examples/{2}".format(
body["workspace_id"], body["intent"], body["text"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.DELETE,
url,
body=json.dumps(response),
status=200,
content_type="",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.delete_example(**body)
return output
def construct_full_body(self):
body = {"workspace_id": "string1",
"intent": "string1", "text": "string1"}
return body
def construct_required_body(self):
body = {"workspace_id": "string1",
"intent": "string1", "text": "string1"}
return body
# endregion
##############################################################################
# End of Service: Examples
##############################################################################
##############################################################################
# Start of Service: Counterexamples
##############################################################################
# region
# -----------------------------------------------------------------------------
# Test Class for list_counterexamples
# -----------------------------------------------------------------------------
class TestListCounterexamples:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_counterexamples_response(self):
body = self.construct_full_body()
response = fake_response_CounterexampleCollection_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_counterexamples_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_CounterexampleCollection_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_list_counterexamples_empty(self):
check_empty_required_params(
self, fake_response_CounterexampleCollection_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/counterexamples".format(
body["workspace_id"])
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.list_counterexamples(**body)
return output
def construct_full_body(self):
body = {
"workspace_id": "string1",
"page_limit": 12345,
"sort": "string1",
"cursor": "string1",
"include_audit": True,
}
return body
def construct_required_body(self):
body = {"workspace_id": "string1"}
return body
# -----------------------------------------------------------------------------
# Test Class for create_counterexample
# -----------------------------------------------------------------------------
class TestCreateCounterexample:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_create_counterexample_response(self):
body = self.construct_full_body()
response = fake_response_Counterexample_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_create_counterexample_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_Counterexample_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_create_counterexample_empty(self):
check_empty_required_params(self, fake_response_Counterexample_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/counterexamples".format(
body["workspace_id"])
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=201,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.create_counterexample(**body)
return output
def construct_full_body(self):
body = {"workspace_id": "string1"}
body.update(
{
"text": "string1",
}
)
body["include_audit"] = True
return body
def construct_required_body(self):
body = {"workspace_id": "string1"}
body.update(
{
"text": "string1",
}
)
return body
# -----------------------------------------------------------------------------
# Test Class for get_counterexample
# -----------------------------------------------------------------------------
class TestGetCounterexample:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_get_counterexample_response(self):
body = self.construct_full_body()
response = fake_response_Counterexample_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_get_counterexample_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_Counterexample_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_get_counterexample_empty(self):
check_empty_required_params(self, fake_response_Counterexample_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/counterexamples/{1}".format(
body["workspace_id"], body["text"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.get_counterexample(**body)
return output
def construct_full_body(self):
body = {"workspace_id": "string1",
"text": "string1", "include_audit": True}
return body
def construct_required_body(self):
body = {"workspace_id": "string1", "text": "string1"}
return body
# -----------------------------------------------------------------------------
# Test Class for update_counterexample
# -----------------------------------------------------------------------------
class TestUpdateCounterexample:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_update_counterexample_response(self):
body = self.construct_full_body()
response = fake_response_Counterexample_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_update_counterexample_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_Counterexample_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_update_counterexample_empty(self):
check_empty_required_params(self, fake_response_Counterexample_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/counterexamples/{1}".format(
body["workspace_id"], body["text"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.update_counterexample(**body)
return output
def construct_full_body(self):
body = {"workspace_id": "string1", "text": "string1"}
body.update(
{
"new_text": "string1",
}
)
body["include_audit"] = True
return body
def construct_required_body(self):
body = {"workspace_id": "string1", "text": "string1"}
body.update(
{
"new_text": "string1",
}
)
return body
# -----------------------------------------------------------------------------
# Test Class for delete_counterexample
# -----------------------------------------------------------------------------
class TestDeleteCounterexample:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_counterexample_response(self):
body = self.construct_full_body()
response = fake_response__json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_counterexample_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response__json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_counterexample_empty(self):
check_empty_required_params(self, fake_response__json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/counterexamples/{1}".format(
body["workspace_id"], body["text"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.DELETE,
url,
body=json.dumps(response),
status=200,
content_type="",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.delete_counterexample(**body)
return output
def construct_full_body(self):
body = {"workspace_id": "string1", "text": "string1"}
return body
def construct_required_body(self):
body = {"workspace_id": "string1", "text": "string1"}
return body
# endregion
##############################################################################
# End of Service: Counterexamples
##############################################################################
##############################################################################
# Start of Service: Entities
##############################################################################
# region
# -----------------------------------------------------------------------------
# Test Class for list_entities
# -----------------------------------------------------------------------------
class TestListEntities:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_entities_response(self):
body = self.construct_full_body()
response = fake_response_EntityCollection_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_entities_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_EntityCollection_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_list_entities_empty(self):
check_empty_required_params(self, fake_response_EntityCollection_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/entities".format(body["workspace_id"])
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.list_entities(**body)
return output
def construct_full_body(self):
body = {
"workspace_id": "string1",
"export": True,
"page_limit": 12345,
"sort": "string1",
"cursor": "string1",
"include_audit": True,
}
return body
def construct_required_body(self):
body = {"workspace_id": "string1"}
return body
# -----------------------------------------------------------------------------
# Test Class for create_entity
# -----------------------------------------------------------------------------
class TestCreateEntity:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_create_entity_response(self):
body = self.construct_full_body()
response = fake_response_Entity_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_create_entity_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_Entity_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_create_entity_empty(self):
check_empty_required_params(self, fake_response_Entity_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/entities".format(body["workspace_id"])
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=201,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.create_entity(**body)
return output
def construct_full_body(self):
body = {"workspace_id": "string1"}
body.update(
{
"entity": "string1",
"description": "string1",
"metadata": {"mock": "data"},
"fuzzy_match": True,
"values": [],
}
)
body["include_audit"] = True
return body
def construct_required_body(self):
body = {"workspace_id": "string1"}
body.update(
{
"entity": "string1",
"description": "string1",
"metadata": {"mock": "data"},
"fuzzy_match": True,
"values": [],
}
)
return body
# -----------------------------------------------------------------------------
# Test Class for get_entity
# -----------------------------------------------------------------------------
class TestGetEntity:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_get_entity_response(self):
body = self.construct_full_body()
response = fake_response_Entity_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_get_entity_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_Entity_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_get_entity_empty(self):
check_empty_required_params(self, fake_response_Entity_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/entities/{1}".format(
body["workspace_id"], body["entity"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.get_entity(**body)
return output
def construct_full_body(self):
body = {
"workspace_id": "string1",
"entity": "string1",
"export": True,
"include_audit": True,
}
return body
def construct_required_body(self):
body = {"workspace_id": "string1", "entity": "string1"}
return body
# -----------------------------------------------------------------------------
# Test Class for update_entity
# -----------------------------------------------------------------------------
class TestUpdateEntity:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_update_entity_response(self):
body = self.construct_full_body()
response = fake_response_Entity_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_update_entity_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_Entity_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_update_entity_empty(self):
check_empty_required_params(self, fake_response_Entity_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/entities/{1}".format(
body["workspace_id"], body["entity"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.update_entity(**body)
return output
def construct_full_body(self):
body = {"workspace_id": "string1", "entity": "string1"}
body.update(
{
"new_entity": "string1",
"new_description": "string1",
"new_metadata": {"mock": "data"},
"new_fuzzy_match": True,
"new_values": [],
}
)
body["append"] = True
body["include_audit"] = True
return body
def construct_required_body(self):
body = {"workspace_id": "string1", "entity": "string1"}
body.update(
{
"new_entity": "string1",
"new_description": "string1",
"new_metadata": {"mock": "data"},
"new_fuzzy_match": True,
"new_values": [],
}
)
return body
# -----------------------------------------------------------------------------
# Test Class for delete_entity
# -----------------------------------------------------------------------------
class TestDeleteEntity:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_entity_response(self):
body = self.construct_full_body()
response = fake_response__json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_entity_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response__json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_entity_empty(self):
check_empty_required_params(self, fake_response__json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/entities/{1}".format(
body["workspace_id"], body["entity"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.DELETE,
url,
body=json.dumps(response),
status=200,
content_type="",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.delete_entity(**body)
return output
def construct_full_body(self):
body = {"workspace_id": "string1", "entity": "string1"}
return body
def construct_required_body(self):
body = {"workspace_id": "string1", "entity": "string1"}
return body
# endregion
##############################################################################
# End of Service: Entities
##############################################################################
##############################################################################
# Start of Service: Mentions
##############################################################################
# region
# -----------------------------------------------------------------------------
# Test Class for list_mentions
# -----------------------------------------------------------------------------
class TestListMentions:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_mentions_response(self):
body = self.construct_full_body()
response = fake_response_EntityMentionCollection_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_mentions_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_EntityMentionCollection_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_list_mentions_empty(self):
check_empty_required_params(
self, fake_response_EntityMentionCollection_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/entities/{1}/mentions".format(
body["workspace_id"], body["entity"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.list_mentions(**body)
return output
def construct_full_body(self):
body = {
"workspace_id": "string1",
"entity": "string1",
"export": True,
"include_audit": True,
}
return body
def construct_required_body(self):
body = {"workspace_id": "string1", "entity": "string1"}
return body
# endregion
##############################################################################
# End of Service: Mentions
##############################################################################
##############################################################################
# Start of Service: Values
##############################################################################
# region
# -----------------------------------------------------------------------------
# Test Class for list_values
# -----------------------------------------------------------------------------
class TestListValues:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_values_response(self):
body = self.construct_full_body()
response = fake_response_ValueCollection_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_values_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_ValueCollection_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_list_values_empty(self):
check_empty_required_params(self, fake_response_ValueCollection_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/entities/{1}/values".format(
body["workspace_id"], body["entity"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.list_values(**body)
return output
def construct_full_body(self):
body = {
"workspace_id": "string1",
"entity": "string1",
"export": True,
"page_limit": 12345,
"sort": "string1",
"cursor": "string1",
"include_audit": True,
}
return body
def construct_required_body(self):
body = {"workspace_id": "string1", "entity": "string1"}
return body
# -----------------------------------------------------------------------------
# Test Class for create_value
# -----------------------------------------------------------------------------
class TestCreateValue:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_create_value_response(self):
body = self.construct_full_body()
response = fake_response_Value_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_create_value_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_Value_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_create_value_empty(self):
check_empty_required_params(self, fake_response_Value_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/entities/{1}/values".format(
body["workspace_id"], body["entity"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=201,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.create_value(**body)
return output
def construct_full_body(self):
body = {"workspace_id": "string1", "entity": "string1"}
body.update(
{
"value": "string1",
"metadata": {"mock": "data"},
"type": "string1",
"synonyms": [],
"patterns": [],
}
)
body["include_audit"] = True
return body
def construct_required_body(self):
body = {"workspace_id": "string1", "entity": "string1"}
body.update(
{
"value": "string1",
"metadata": {"mock": "data"},
"type": "string1",
"synonyms": [],
"patterns": [],
}
)
return body
# -----------------------------------------------------------------------------
# Test Class for get_value
# -----------------------------------------------------------------------------
class TestGetValue:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_get_value_response(self):
body = self.construct_full_body()
response = fake_response_Value_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_get_value_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_Value_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_get_value_empty(self):
check_empty_required_params(self, fake_response_Value_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/entities/{1}/values/{2}".format(
body["workspace_id"], body["entity"], body["value"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.get_value(**body)
return output
def construct_full_body(self):
body = {
"workspace_id": "string1",
"entity": "string1",
"value": "string1",
"export": True,
"include_audit": True,
}
return body
def construct_required_body(self):
body = {"workspace_id": "string1",
"entity": "string1", "value": "string1"}
return body
# -----------------------------------------------------------------------------
# Test Class for update_value
# -----------------------------------------------------------------------------
class TestUpdateValue:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_update_value_response(self):
body = self.construct_full_body()
response = fake_response_Value_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_update_value_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_Value_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_update_value_empty(self):
check_empty_required_params(self, fake_response_Value_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/entities/{1}/values/{2}".format(
body["workspace_id"], body["entity"], body["value"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.update_value(**body)
return output
def construct_full_body(self):
body = {"workspace_id": "string1",
"entity": "string1", "value": "string1"}
body.update(
{
"new_value": "string1",
"new_metadata": {"mock": "data"},
"new_type": "string1",
"new_synonyms": [],
"new_patterns": [],
}
)
body["append"] = True
body["include_audit"] = True
return body
def construct_required_body(self):
body = {"workspace_id": "string1",
"entity": "string1", "value": "string1"}
body.update(
{
"new_value": "string1",
"new_metadata": {"mock": "data"},
"new_type": "string1",
"new_synonyms": [],
"new_patterns": [],
}
)
return body
# -----------------------------------------------------------------------------
# Test Class for delete_value
# -----------------------------------------------------------------------------
class TestDeleteValue:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_value_response(self):
body = self.construct_full_body()
response = fake_response__json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_value_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response__json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_value_empty(self):
check_empty_required_params(self, fake_response__json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/entities/{1}/values/{2}".format(
body["workspace_id"], body["entity"], body["value"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.DELETE,
url,
body=json.dumps(response),
status=200,
content_type="",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.delete_value(**body)
return output
def construct_full_body(self):
body = {"workspace_id": "string1",
"entity": "string1", "value": "string1"}
return body
def construct_required_body(self):
body = {"workspace_id": "string1",
"entity": "string1", "value": "string1"}
return body
# endregion
##############################################################################
# End of Service: Values
##############################################################################
##############################################################################
# Start of Service: Synonyms
##############################################################################
# region
# -----------------------------------------------------------------------------
# Test Class for list_synonyms
# -----------------------------------------------------------------------------
class TestListSynonyms:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_synonyms_response(self):
body = self.construct_full_body()
response = fake_response_SynonymCollection_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_synonyms_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_SynonymCollection_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_list_synonyms_empty(self):
check_empty_required_params(self, fake_response_SynonymCollection_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms".format(
body["workspace_id"], body["entity"], body["value"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.list_synonyms(**body)
return output
def construct_full_body(self):
body = {
"workspace_id": "string1",
"entity": "string1",
"value": "string1",
"page_limit": 12345,
"sort": "string1",
"cursor": "string1",
"include_audit": True,
}
return body
def construct_required_body(self):
body = {"workspace_id": "string1",
"entity": "string1", "value": "string1"}
return body
# -----------------------------------------------------------------------------
# Test Class for create_synonym
# -----------------------------------------------------------------------------
class TestCreateSynonym:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_create_synonym_response(self):
body = self.construct_full_body()
response = fake_response_Synonym_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_create_synonym_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_Synonym_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_create_synonym_empty(self):
check_empty_required_params(self, fake_response_Synonym_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms".format(
body["workspace_id"], body["entity"], body["value"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=201,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.create_synonym(**body)
return output
def construct_full_body(self):
body = {"workspace_id": "string1",
"entity": "string1", "value": "string1"}
body.update(
{
"synonym": "string1",
}
)
body["include_audit"] = True
return body
def construct_required_body(self):
body = {"workspace_id": "string1",
"entity": "string1", "value": "string1"}
body.update(
{
"synonym": "string1",
}
)
return body
# -----------------------------------------------------------------------------
# Test Class for get_synonym
# -----------------------------------------------------------------------------
class TestGetSynonym:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_get_synonym_response(self):
body = self.construct_full_body()
response = fake_response_Synonym_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_get_synonym_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_Synonym_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_get_synonym_empty(self):
check_empty_required_params(self, fake_response_Synonym_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms/{3}".format(
body["workspace_id"], body["entity"], body["value"], body["synonym"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.get_synonym(**body)
return output
def construct_full_body(self):
body = {
"workspace_id": "string1",
"entity": "string1",
"value": "string1",
"synonym": "string1",
"include_audit": True,
}
return body
def construct_required_body(self):
body = {
"workspace_id": "string1",
"entity": "string1",
"value": "string1",
"synonym": "string1",
}
return body
# -----------------------------------------------------------------------------
# Test Class for update_synonym
# -----------------------------------------------------------------------------
class TestUpdateSynonym:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_update_synonym_response(self):
body = self.construct_full_body()
response = fake_response_Synonym_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_update_synonym_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_Synonym_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_update_synonym_empty(self):
check_empty_required_params(self, fake_response_Synonym_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms/{3}".format(
body["workspace_id"], body["entity"], body["value"], body["synonym"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.update_synonym(**body)
return output
def construct_full_body(self):
body = {
"workspace_id": "string1",
"entity": "string1",
"value": "string1",
"synonym": "string1",
}
body.update(
{
"new_synonym": "string1",
}
)
body["include_audit"] = True
return body
def construct_required_body(self):
body = {
"workspace_id": "string1",
"entity": "string1",
"value": "string1",
"synonym": "string1",
}
body.update(
{
"new_synonym": "string1",
}
)
return body
# -----------------------------------------------------------------------------
# Test Class for delete_synonym
# -----------------------------------------------------------------------------
class TestDeleteSynonym:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_synonym_response(self):
body = self.construct_full_body()
response = fake_response__json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_synonym_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response__json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_synonym_empty(self):
check_empty_required_params(self, fake_response__json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms/{3}".format(
body["workspace_id"], body["entity"], body["value"], body["synonym"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.DELETE,
url,
body=json.dumps(response),
status=200,
content_type="",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.delete_synonym(**body)
return output
def construct_full_body(self):
body = {
"workspace_id": "string1",
"entity": "string1",
"value": "string1",
"synonym": "string1",
}
return body
def construct_required_body(self):
body = {
"workspace_id": "string1",
"entity": "string1",
"value": "string1",
"synonym": "string1",
}
return body
# endregion
##############################################################################
# End of Service: Synonyms
##############################################################################
##############################################################################
# Start of Service: DialogNodes
##############################################################################
# region
# -----------------------------------------------------------------------------
# Test Class for list_dialog_nodes
# -----------------------------------------------------------------------------
class TestListDialogNodes:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_dialog_nodes_response(self):
body = self.construct_full_body()
response = fake_response_DialogNodeCollection_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_dialog_nodes_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_DialogNodeCollection_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_list_dialog_nodes_empty(self):
check_empty_required_params(
self, fake_response_DialogNodeCollection_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/dialog_nodes".format(
body["workspace_id"])
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.list_dialog_nodes(**body)
return output
def construct_full_body(self):
body = {
"workspace_id": "string1",
"page_limit": 12345,
"sort": "string1",
"cursor": "string1",
"include_audit": True,
}
return body
def construct_required_body(self):
body = {"workspace_id": "string1"}
return body
# -----------------------------------------------------------------------------
# Test Class for create_dialog_node
# -----------------------------------------------------------------------------
class TestCreateDialogNode:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_create_dialog_node_response(self):
body = self.construct_full_body()
response = fake_response_DialogNode_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_create_dialog_node_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_DialogNode_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_create_dialog_node_empty(self):
check_empty_required_params(self, fake_response_DialogNode_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/dialog_nodes".format(
body["workspace_id"])
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=201,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.create_dialog_node(**body)
return output
def construct_full_body(self):
body = {"workspace_id": "string1"}
body.update(
{
"dialog_node": "string1",
"description": "string1",
"conditions": "string1",
"parent": "string1",
"previous_sibling": "string1",
"output": DialogNodeOutput._from_dict(
json.loads(
"""{"generic": [], "modifiers": {"overwrite": false}}""")
),
"context": {"mock": "data"},
"metadata": {"mock": "data"},
"next_step": DialogNodeNextStep._from_dict(
json.loads(
"""{"behavior": "fake_behavior", "dialog_node": "fake_dialog_node", "selector": "fake_selector"}"""
)
),
"title": "string1",
"type": "string1",
"event_name": "string1",
"variable": "string1",
"actions": [],
"digress_in": "string1",
"digress_out": "string1",
"digress_out_slots": "string1",
"user_label": "string1",
"disambiguation_opt_out": True,
}
)
body["include_audit"] = True
return body
def construct_required_body(self):
body = {"workspace_id": "string1"}
body.update(
{
"dialog_node": "string1",
"description": "string1",
"conditions": "string1",
"parent": "string1",
"previous_sibling": "string1",
"output": DialogNodeOutput._from_dict(
json.loads(
"""{"generic": [], "modifiers": {"overwrite": false}}""")
),
"context": {"mock": "data"},
"metadata": {"mock": "data"},
"next_step": DialogNodeNextStep._from_dict(
json.loads(
"""{"behavior": "fake_behavior", "dialog_node": "fake_dialog_node", "selector": "fake_selector"}"""
)
),
"title": "string1",
"type": "string1",
"event_name": "string1",
"variable": "string1",
"actions": [],
"digress_in": "string1",
"digress_out": "string1",
"digress_out_slots": "string1",
"user_label": "string1",
"disambiguation_opt_out": True,
}
)
return body
# -----------------------------------------------------------------------------
# Test Class for get_dialog_node
# -----------------------------------------------------------------------------
class TestGetDialogNode:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_get_dialog_node_response(self):
body = self.construct_full_body()
response = fake_response_DialogNode_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_get_dialog_node_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_DialogNode_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_get_dialog_node_empty(self):
check_empty_required_params(self, fake_response_DialogNode_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/dialog_nodes/{1}".format(
body["workspace_id"], body["dialog_node"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.get_dialog_node(**body)
return output
def construct_full_body(self):
body = {
"workspace_id": "string1",
"dialog_node": "string1",
"include_audit": True,
}
return body
def construct_required_body(self):
body = {"workspace_id": "string1", "dialog_node": "string1"}
return body
# -----------------------------------------------------------------------------
# Test Class for update_dialog_node
# -----------------------------------------------------------------------------
class TestUpdateDialogNode:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_update_dialog_node_response(self):
body = self.construct_full_body()
response = fake_response_DialogNode_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_update_dialog_node_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_DialogNode_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_update_dialog_node_empty(self):
check_empty_required_params(self, fake_response_DialogNode_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/dialog_nodes/{1}".format(
body["workspace_id"], body["dialog_node"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.update_dialog_node(**body)
return output
def construct_full_body(self):
body = {"workspace_id": "string1", "dialog_node": "string1"}
body.update(
{
"new_dialog_node": "string1",
"new_description": "string1",
"new_conditions": "string1",
"new_parent": "string1",
"new_previous_sibling": "string1",
"new_output": DialogNodeOutput._from_dict(
json.loads(
"""{"generic": [], "modifiers": {"overwrite": false}}""")
),
"new_context": {"mock": "data"},
"new_metadata": {"mock": "data"},
"new_next_step": DialogNodeNextStep._from_dict(
json.loads(
"""{"behavior": "fake_behavior", "dialog_node": "fake_dialog_node", "selector": "fake_selector"}"""
)
),
"new_title": "string1",
"new_type": "string1",
"new_event_name": "string1",
"new_variable": "string1",
"new_actions": [],
"new_digress_in": "string1",
"new_digress_out": "string1",
"new_digress_out_slots": "string1",
"new_user_label": "string1",
"new_disambiguation_opt_out": True,
}
)
body["include_audit"] = True
return body
def construct_required_body(self):
body = {"workspace_id": "string1", "dialog_node": "string1"}
body.update(
{
"new_dialog_node": "string1",
"new_description": "string1",
"new_conditions": "string1",
"new_parent": "string1",
"new_previous_sibling": "string1",
"new_output": DialogNodeOutput._from_dict(
json.loads(
"""{"generic": [], "modifiers": {"overwrite": false}}""")
),
"new_context": {"mock": "data"},
"new_metadata": {"mock": "data"},
"new_next_step": DialogNodeNextStep._from_dict(
json.loads(
"""{"behavior": "fake_behavior", "dialog_node": "fake_dialog_node", "selector": "fake_selector"}"""
)
),
"new_title": "string1",
"new_type": "string1",
"new_event_name": "string1",
"new_variable": "string1",
"new_actions": [],
"new_digress_in": "string1",
"new_digress_out": "string1",
"new_digress_out_slots": "string1",
"new_user_label": "string1",
"new_disambiguation_opt_out": True,
}
)
return body
# -----------------------------------------------------------------------------
# Test Class for delete_dialog_node
# -----------------------------------------------------------------------------
class TestDeleteDialogNode:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_dialog_node_response(self):
body = self.construct_full_body()
response = fake_response__json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_dialog_node_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response__json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_dialog_node_empty(self):
check_empty_required_params(self, fake_response__json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/dialog_nodes/{1}".format(
body["workspace_id"], body["dialog_node"]
)
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.DELETE,
url,
body=json.dumps(response),
status=200,
content_type="",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.delete_dialog_node(**body)
return output
def construct_full_body(self):
body = {"workspace_id": "string1", "dialog_node": "string1"}
return body
def construct_required_body(self):
body = {"workspace_id": "string1", "dialog_node": "string1"}
return body
# endregion
##############################################################################
# End of Service: DialogNodes
##############################################################################
##############################################################################
# Start of Service: Logs
##############################################################################
# region
# -----------------------------------------------------------------------------
# Test Class for list_logs
# -----------------------------------------------------------------------------
class TestListLogs:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_logs_response(self):
body = self.construct_full_body()
response = fake_response_LogCollection_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_logs_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_LogCollection_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_list_logs_empty(self):
check_empty_required_params(self, fake_response_LogCollection_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/workspaces/{0}/logs".format(body["workspace_id"])
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.list_logs(**body)
return output
def construct_full_body(self):
body = {
"workspace_id": "string1",
"sort": "string1",
"filter": "string1",
"page_limit": 12345,
"cursor": "string1",
}
return body
def construct_required_body(self):
body = {"workspace_id": "string1"}
return body
# -----------------------------------------------------------------------------
# Test Class for list_all_logs
# -----------------------------------------------------------------------------
class TestListAllLogs:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_all_logs_response(self):
body = self.construct_full_body()
response = fake_response_LogCollection_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_all_logs_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_LogCollection_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_list_all_logs_empty(self):
check_empty_required_params(self, fake_response_LogCollection_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/logs"
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.list_all_logs(**body)
return output
def construct_full_body(self):
body = {
"filter": "string1",
"sort": "string1",
"page_limit": 12345,
"cursor": "string1",
}
return body
def construct_required_body(self):
body = {"filter": "string1"}
return body
# endregion
##############################################################################
# End of Service: Logs
##############################################################################
##############################################################################
# Start of Service: UserData
##############################################################################
# region
# -----------------------------------------------------------------------------
# Test Class for delete_user_data
# -----------------------------------------------------------------------------
class TestDeleteUserData:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_user_data_response(self):
body = self.construct_full_body()
response = fake_response__json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_user_data_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response__json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_user_data_empty(self):
check_empty_required_params(self, fake_response__json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/user_data"
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.DELETE,
url,
body=json.dumps(response),
status=202,
content_type="",
)
def call_service(self, body):
service = AssistantV1(
authenticator=NoAuthAuthenticator(),
version="2019-02-28",
)
service.set_service_url(base_url)
output = service.delete_user_data(**body)
return output
def construct_full_body(self):
body = {"customer_id": "string1"}
return body
def construct_required_body(self):
body = {"customer_id": "string1"}
return body
# endregion
##############################################################################
# End of Service: UserData
##############################################################################
def check_empty_required_params(obj, response):
"""Test function to assert that the operation will throw an error when given empty required data
Args:
obj: The generated test function
"""
body = obj.construct_full_body()
body = {k: None for k in body.keys()}
error = False
try:
send_request(obj, body, response)
except ValueError as e:
error = True
assert error
def check_missing_required_params(obj):
"""Test function to assert that the operation will throw an error when missing required data
Args:
obj: The generated test function
"""
body = obj.construct_full_body()
url = obj.make_url(body)
error = False
try:
send_request(obj, {}, {}, url=url)
except TypeError as e:
error = True
assert error
def check_empty_response(obj):
"""Test function to assert that the operation will return an empty response when given an empty request
Args:
obj: The generated test function
"""
body = obj.construct_full_body()
url = obj.make_url(body)
send_request(obj, {}, {}, url=url)
def send_request(obj, body, response, url=None):
"""Test function to create a request, send it, and assert its accuracy to the mock response
Args:
obj: The generated test function
body: Dict filled with fake data for calling the service
response_str: Mock response string
"""
if not url:
url = obj.make_url(body)
obj.add_mock_response(url, response)
output = obj.call_service(body)
assert responses.calls[0].request.url.startswith(url)
assert output.get_result() == response
####################
## Mock Responses ##
####################
fake_response__json = None
fake_response_MessageResponse_json = """{"input": {"text": "fake_text"}, "intents": [], "entities": [], "alternate_intents": false, "context": {"conversation_id": "fake_conversation_id", "system": {}, "metadata": {"deployment": "fake_deployment", "user_id": "fake_user_id"}}, "output": {"nodes_visited": [], "nodes_visited_details": [], "log_messages": [], "text": [], "generic": []}, "actions": []}"""
fake_response_WorkspaceCollection_json = """{"workspaces": [], "pagination": {"refresh_url": "fake_refresh_url", "next_url": "fake_next_url", "total": 5, "matched": 7, "refresh_cursor": "fake_refresh_cursor", "next_cursor": "fake_next_cursor"}}"""
fake_response_Workspace_json = """{"name": "fake_name", "description": "fake_description", "language": "fake_language", "learning_opt_out": true, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "fake_prompt", "none_of_the_above_prompt": "fake_none_of_the_above_prompt", "enabled": false, "sensitivity": "fake_sensitivity", "randomize": false, "max_suggestions": 15, "suggestion_text_policy": "fake_suggestion_text_policy"}, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "workspace_id": "fake_workspace_id", "status": "fake_status", "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z", "intents": [], "entities": [], "dialog_nodes": [], "counterexamples": [], "webhooks": []}"""
fake_response_Workspace_json = """{"name": "fake_name", "description": "fake_description", "language": "fake_language", "learning_opt_out": true, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "fake_prompt", "none_of_the_above_prompt": "fake_none_of_the_above_prompt", "enabled": false, "sensitivity": "fake_sensitivity", "randomize": false, "max_suggestions": 15, "suggestion_text_policy": "fake_suggestion_text_policy"}, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "workspace_id": "fake_workspace_id", "status": "fake_status", "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z", "intents": [], "entities": [], "dialog_nodes": [], "counterexamples": [], "webhooks": []}"""
fake_response_Workspace_json = """{"name": "fake_name", "description": "fake_description", "language": "fake_language", "learning_opt_out": true, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "fake_prompt", "none_of_the_above_prompt": "fake_none_of_the_above_prompt", "enabled": false, "sensitivity": "fake_sensitivity", "randomize": false, "max_suggestions": 15, "suggestion_text_policy": "fake_suggestion_text_policy"}, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}}, "workspace_id": "fake_workspace_id", "status": "fake_status", "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z", "intents": [], "entities": [], "dialog_nodes": [], "counterexamples": [], "webhooks": []}"""
fake_response_IntentCollection_json = """{"intents": [], "pagination": {"refresh_url": "fake_refresh_url", "next_url": "fake_next_url", "total": 5, "matched": 7, "refresh_cursor": "fake_refresh_cursor", "next_cursor": "fake_next_cursor"}}"""
fake_response_Intent_json = """{"intent": "fake_intent", "description": "fake_description", "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z", "examples": []}"""
fake_response_Intent_json = """{"intent": "fake_intent", "description": "fake_description", "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z", "examples": []}"""
fake_response_Intent_json = """{"intent": "fake_intent", "description": "fake_description", "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z", "examples": []}"""
fake_response_ExampleCollection_json = """{"examples": [], "pagination": {"refresh_url": "fake_refresh_url", "next_url": "fake_next_url", "total": 5, "matched": 7, "refresh_cursor": "fake_refresh_cursor", "next_cursor": "fake_next_cursor"}}"""
fake_response_Example_json = """{"text": "fake_text", "mentions": [], "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z"}"""
fake_response_Example_json = """{"text": "fake_text", "mentions": [], "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z"}"""
fake_response_Example_json = """{"text": "fake_text", "mentions": [], "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z"}"""
fake_response_CounterexampleCollection_json = """{"counterexamples": [], "pagination": {"refresh_url": "fake_refresh_url", "next_url": "fake_next_url", "total": 5, "matched": 7, "refresh_cursor": "fake_refresh_cursor", "next_cursor": "fake_next_cursor"}}"""
fake_response_Counterexample_json = """{"text": "fake_text", "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z"}"""
fake_response_Counterexample_json = """{"text": "fake_text", "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z"}"""
fake_response_Counterexample_json = """{"text": "fake_text", "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z"}"""
fake_response_EntityCollection_json = """{"entities": [], "pagination": {"refresh_url": "fake_refresh_url", "next_url": "fake_next_url", "total": 5, "matched": 7, "refresh_cursor": "fake_refresh_cursor", "next_cursor": "fake_next_cursor"}}"""
fake_response_Entity_json = """{"entity": "fake_entity", "description": "fake_description", "fuzzy_match": false, "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z", "values": []}"""
fake_response_Entity_json = """{"entity": "fake_entity", "description": "fake_description", "fuzzy_match": false, "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z", "values": []}"""
fake_response_Entity_json = """{"entity": "fake_entity", "description": "fake_description", "fuzzy_match": false, "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z", "values": []}"""
fake_response_EntityMentionCollection_json = """{"examples": [], "pagination": {"refresh_url": "fake_refresh_url", "next_url": "fake_next_url", "total": 5, "matched": 7, "refresh_cursor": "fake_refresh_cursor", "next_cursor": "fake_next_cursor"}}"""
fake_response_ValueCollection_json = """{"values": [], "pagination": {"refresh_url": "fake_refresh_url", "next_url": "fake_next_url", "total": 5, "matched": 7, "refresh_cursor": "fake_refresh_cursor", "next_cursor": "fake_next_cursor"}}"""
fake_response_Value_json = """{"value": "fake_value", "type": "fake_type", "synonyms": [], "patterns": [], "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z"}"""
fake_response_Value_json = """{"value": "fake_value", "type": "fake_type", "synonyms": [], "patterns": [], "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z"}"""
fake_response_Value_json = """{"value": "fake_value", "type": "fake_type", "synonyms": [], "patterns": [], "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z"}"""
fake_response_SynonymCollection_json = """{"synonyms": [], "pagination": {"refresh_url": "fake_refresh_url", "next_url": "fake_next_url", "total": 5, "matched": 7, "refresh_cursor": "fake_refresh_cursor", "next_cursor": "fake_next_cursor"}}"""
fake_response_Synonym_json = """{"synonym": "fake_synonym", "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z"}"""
fake_response_Synonym_json = """{"synonym": "fake_synonym", "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z"}"""
fake_response_Synonym_json = """{"synonym": "fake_synonym", "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z"}"""
fake_response_DialogNodeCollection_json = """{"dialog_nodes": [], "pagination": {"refresh_url": "fake_refresh_url", "next_url": "fake_next_url", "total": 5, "matched": 7, "refresh_cursor": "fake_refresh_cursor", "next_cursor": "fake_next_cursor"}}"""
fake_response_DialogNode_json = """{"dialog_node": "fake_dialog_node", "description": "fake_description", "conditions": "fake_conditions", "parent": "fake_parent", "previous_sibling": "fake_previous_sibling", "output": {"generic": [], "modifiers": {"overwrite": false}}, "next_step": {"behavior": "fake_behavior", "dialog_node": "fake_dialog_node", "selector": "fake_selector"}, "title": "fake_title", "type": "fake_type", "event_name": "fake_event_name", "variable": "fake_variable", "actions": [], "digress_in": "fake_digress_in", "digress_out": "fake_digress_out", "digress_out_slots": "fake_digress_out_slots", "user_label": "fake_user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z"}"""
fake_response_DialogNode_json = """{"dialog_node": "fake_dialog_node", "description": "fake_description", "conditions": "fake_conditions", "parent": "fake_parent", "previous_sibling": "fake_previous_sibling", "output": {"generic": [], "modifiers": {"overwrite": false}}, "next_step": {"behavior": "fake_behavior", "dialog_node": "fake_dialog_node", "selector": "fake_selector"}, "title": "fake_title", "type": "fake_type", "event_name": "fake_event_name", "variable": "fake_variable", "actions": [], "digress_in": "fake_digress_in", "digress_out": "fake_digress_out", "digress_out_slots": "fake_digress_out_slots", "user_label": "fake_user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z"}"""
fake_response_DialogNode_json = """{"dialog_node": "fake_dialog_node", "description": "fake_description", "conditions": "fake_conditions", "parent": "fake_parent", "previous_sibling": "fake_previous_sibling", "output": {"generic": [], "modifiers": {"overwrite": false}}, "next_step": {"behavior": "fake_behavior", "dialog_node": "fake_dialog_node", "selector": "fake_selector"}, "title": "fake_title", "type": "fake_type", "event_name": "fake_event_name", "variable": "fake_variable", "actions": [], "digress_in": "fake_digress_in", "digress_out": "fake_digress_out", "digress_out_slots": "fake_digress_out_slots", "user_label": "fake_user_label", "disambiguation_opt_out": true, "disabled": true, "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z"}"""
fake_response_LogCollection_json = """{"logs": [], "pagination": {"next_url": "fake_next_url", "matched": 7, "next_cursor": "fake_next_cursor"}}"""
fake_response_LogCollection_json = """{"logs": [], "pagination": {"next_url": "fake_next_url", "matched": 7, "next_cursor": "fake_next_cursor"}}"""
| StarcoderdataPython |
3339422 | class Solution:
def longestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
def checkPalindrome(s):
"""
check string is palindrome, if true return count, else return 0
:type s: str
:rtype: int
"""
list_s = list(s)
if list_s == list_s[::-1]:
return len(list_s)
return 0
# check empty string
if len(s) == 0:
return ""
longest_s = s[0]
longest_count = 1
letter_idx = {}
letter_set = set(list(s))
for idx, letter in enumerate(s):
letter_idx[letter] = letter_idx.get(letter, []) + [idx]
for letter in letter_set:
letter_list = letter_idx[letter]
if len(letter_list) > 1:
# iterator check the farest string
for i in range(len(letter_list)-1):
for j in range(len(letter_list)-1, i, -1):
start_idx = letter_list[i]
end_idx = letter_list[j]+1
substring = s[start_idx:end_idx]
temp_longest_count = end_idx - start_idx
# early break
if(temp_longest_count < longest_count):
break
# check substring palindrome
palindrome_count = checkPalindrome(substring)
if longest_count < palindrome_count:
longest_s = substring
longest_count = palindrome_count
# early break
if palindrome_count > 0:
break
return longest_s
| StarcoderdataPython |
1670863 | from re import Match
from typing import Any, Dict, Iterable, Union
import confuse
import pytest
from .conventional_commits import ConventionalCommitParser
INVALID_SUBJECT_REGEX = [
("spa ce: Type with space"),
("type(spa ce): Scope with space"),
("type !: Space between type and bang"),
("type(scope) !: Space between scope and bang"),
("type (scope): Space between type and scope"),
("type( scope): Leading space in scope"),
("type(scope ): Trailing space in scope"),
("type( scope ): Leading and trailing space in scope"),
("type:Missing space after colon"),
]
INVALID_FOOTER_REGEX = [
("Key : Value"),
("Key # Value"),
("Key with space: Value"),
("Key with space #Value"),
]
VALID_SUBJECT_REGEX = [
(
"type: Description",
{"type": "type", "scope": None, "breaking": None, "message": "Description"},
),
(
"type!: Breaking Change",
{"type": "type", "scope": None, "breaking": "!", "message": "Breaking Change"},
),
(
"type(scope): Scoped Change",
{
"type": "type",
"scope": "scope",
"breaking": None,
"message": "Scoped Change",
},
),
(
"type(scope)!: Scoped Breaking Change",
{
"type": "type",
"scope": "scope",
"breaking": "!",
"message": "Scoped Breaking Change",
},
),
(
"hyp-hen: Hyphenated Type",
{
"type": "hyp-hen",
"scope": None,
"breaking": None,
"message": "Hyphenated Type",
},
),
(
"type(hyp-hen): Hyphenated Scope",
{
"type": "type",
"scope": "hyp-hen",
"breaking": None,
"message": "Hyphenated Scope",
},
),
]
VALID_BODY_REGEX = [
(
"A body\nwith no footers",
{"content": "A body\nwith no footers", "footer": None},
),
("Footer-Only: True", {"content": None, "footer": "Footer-Only: True",},),
(
"A body\n\nAnd a second\nparagraph",
{"content": "A body\n\nAnd a second\nparagraph", "footer": None},
),
(
"A body with a footer\n\nFooter: value",
{"content": "A body with a footer", "footer": "Footer: value"},
),
(
"A body with a different kind of footer\n\nFooter #Value",
{
"content": "A body with a different kind of footer",
"footer": "Footer #Value",
},
),
(
"A body with a breaking change in the footer section\n\nBREAKING CHANGE: A description of the change",
{
"content": "A body with a breaking change in the footer section",
"footer": "BREAKING CHANGE: A description of the change",
},
),
(
"A body with a footer\n\nAnd a second paragraph\n\nFooter: value",
{
"content": "A body with a footer\n\nAnd a second paragraph",
"footer": "Footer: value",
},
),
(
"A body with multiple footer\n\nOne: value\n\nTwo: value",
{
"content": "A body with multiple footer",
"footer": "One: value\n\nTwo: value",
},
),
(
"A body with a multiline footer\n\nOne: value\nand some more",
{
"content": "A body with a multiline footer",
"footer": "One: value\nand some more",
},
),
# Edge-case tests
(
"A body with a single footer that looks like two\n\nOne: value\nTwo: value",
{
"content": "A body with a single footer that looks like two",
"footer": "One: value\nTwo: value",
},
),
(
"A body with a footer containing a hyphen\n\nFooter-With-Hyphen: value",
{
"content": "A body with a footer containing a hyphen",
"footer": "Footer-With-Hyphen: value",
},
),
(
"A body, but no footer as there is only one newline\nFooter: value",
{
"content": "A body, but no footer as there is only one newline\nFooter: value",
"footer": None,
},
),
(
"A body, but no footer as it has an invalid key\n\nFooter : value",
{
"content": "A body, but no footer as it has an invalid key\n\nFooter : value",
"footer": None,
},
),
(
"A body, but no footer as it has an invalid key\n\nFooter # value",
{
"content": "A body, but no footer as it has an invalid key\n\nFooter # value",
"footer": None,
},
),
]
VALID_FOOTER_REGEX = [
("Key: Value", [{"key": "Key", "value": "Value"}]),
("Key #Value", [{"key": "Key", "value": "Value"}]),
("BREAKING CHANGE: Value", [{"key": "BREAKING CHANGE", "value": "Value"}]),
("Key: Line one\nLine two", [{"key": "Key", "value": "Line one\nLine two"}]),
(
"Key: Line one\nKey # Line two",
[{"key": "Key", "value": "Line one\nKey # Line two"}],
),
(
"Key-One: Value-One\nKey-Two: Value-Two",
[
{"key": "Key-One", "value": "Value-One"},
{"key": "Key-Two", "value": "Value-Two"},
],
),
(
"Key-One: Value-One\nKey-Two: Value-Two",
[
{"key": "Key-One", "value": "Value-One"},
{"key": "Key-Two", "value": "Value-Two"},
],
),
(
"Key-One: Value-One\n\nKey-Two: Value-Two",
[
{"key": "Key-One", "value": "Value-One\n"},
{"key": "Key-Two", "value": "Value-Two"},
],
),
(
"Key-One: Value-One\n\nParagraph two\n\nKey-Two: Value-Two",
[
{"key": "Key-One", "value": "Value-One\n\nParagraph two\n"},
{"key": "Key-Two", "value": "Value-Two"},
],
),
# Edge-case tests
("Single-Character-Value: A", [{"key": "Single-Character-Value", "value": "A"}]),
]
POST_PROCESS_DATA = [
({}, {"metadata": {"breaking": False, "closes": []}}),
(
{"subject": {"breaking": "!"}},
{"subject": {"breaking": "!"}, "metadata": {"breaking": True, "closes": []}},
),
(
{"subject": {"breaking": None}},
{"subject": {"breaking": None}, "metadata": {"breaking": False, "closes": []}},
),
(
{
"body": {
"footer": {"items": [{"key": "BREAKING CHANGE", "value": "Anything"}]}
}
},
{
"metadata": {"breaking": True, "closes": []},
"body": {
"footer": {"items": [{"key": "BREAKING CHANGE", "value": "Anything"}]}
},
},
),
(
{
"body": {
"footer": {
"items": [
{"key": "Closes", "value": "A-TICKET"},
{"key": "Refs", "value": "B-TICKET"},
]
}
}
},
{
"metadata": {"breaking": False, "closes": ["A-TICKET", "B-TICKET"]},
"body": {
"footer": {
"items": [
{"key": "Closes", "value": "A-TICKET"},
{"key": "Refs", "value": "B-TICKET"},
]
}
},
},
),
]
@pytest.mark.parametrize("text", INVALID_SUBJECT_REGEX)
def test_invalid_subject_regex(text: str) -> None:
config = confuse.Configuration("Conventional", "conventional", read=False)
config.read(user=False)
config["parser"]["config"]["types"] = ["type", "hyp-hen"]
parser = ConventionalCommitParser(config["parser"]["config"])
actual = parser.get_parsers()["subject"](text)
assert actual is None
@pytest.mark.parametrize("text", INVALID_FOOTER_REGEX)
def test_invalid_footer_regex(text: str) -> None:
config = confuse.Configuration("Conventional", "conventional", read=False)
config.read(user=False)
config["parser"]["config"]["types"] = ["type", "hyp-hen"]
parser = ConventionalCommitParser(config["parser"]["config"])
actual = parser.get_parsers()["footer"](text)
assert isinstance(actual, Iterable)
actual_data = list(actual)
assert len(actual_data) == 0
@pytest.mark.parametrize("text, expected", VALID_SUBJECT_REGEX)
def test_valid_subject_regex(
text: str, expected: Union[Dict, Iterable[Dict], None]
) -> None:
config = confuse.Configuration("Conventional", "conventional", read=False)
config.read(user=False)
config["parser"]["config"]["types"] = ["type", "hyp-hen"]
parser = ConventionalCommitParser(config["parser"]["config"])
actual = parser.get_parsers()["subject"](text)
assert isinstance(actual, Match)
assert actual.groupdict() == expected
@pytest.mark.parametrize("text, expected", VALID_BODY_REGEX)
def test_valid_body_regex(
text: str, expected: Union[Dict, Iterable[Dict], None]
) -> None:
config = confuse.Configuration("Conventional", "conventional", read=False)
config.read(user=False)
config["parser"]["config"]["types"] = ["type", "hyp-hen"]
parser = ConventionalCommitParser(config["parser"]["config"])
actual = parser.get_parsers()["body"](text)
assert isinstance(actual, Match)
assert actual.groupdict() == expected
@pytest.mark.parametrize("text, expected", VALID_FOOTER_REGEX)
def test_valid_footer_regex(
text: str, expected: Union[Dict, Iterable[Dict], None]
) -> None:
config = confuse.Configuration("Conventional", "conventional", read=False)
config.read(user=False)
config["parser"]["config"]["types"] = ["type", "hyp-hen"]
parser = ConventionalCommitParser(config["parser"]["config"])
actual = parser.get_parsers()["footer"](text)
assert isinstance(actual, Iterable)
actual_data = [match.groupdict() for match in actual]
assert actual_data == expected
@pytest.mark.parametrize("data, expected", POST_PROCESS_DATA)
def test_post_process(data: Dict[str, Any], expected: Dict[str, Any]) -> None:
config = confuse.Configuration("Conventional", "conventional", read=False)
config.read(user=False)
config["parser"]["config"]["types"] = ["type", "hyp-hen"]
parser = ConventionalCommitParser(config["parser"]["config"])
actual = data.copy()
parser.post_process(actual)
assert actual == expected
| StarcoderdataPython |
4915613 | <reponame>qenett/pandas-intro-clone
import pandas
mesta = pandas.read_csv("mesta.csv", index_col="mesto", encoding="utf-8")
# ## 2. Základní selekce
# Výběr hodnot v tabulce probíhá převážně pomocí "metody" `loc`.
# ### Výběr podle jmen řádků a sloupců
# #### 1. Pouze řádky
print(mesta.loc["brno"])
print(mesta.loc[["brno", "praha", "ostrava"]])
# V prvním případě jsme dostali výsledek orientovaný na výšku, ve druhém na šířku. Poprvé jsme chtěli údaj o 1 městu a dostali tzv. `Series`, podruhé jsme chtěli údaj o více městech a dostali `DataFrame` (podmnožinu toho původního DataFrame `mesta`).
#
# Mohli bychom si taky explicitně říct o výsledek pro 1 město v podobě DataFrame.
print(mesta.loc[["brno"]])
# Lze použít také rozsah (záleží samozřejmě na pořadí, v jakém máme data uložena).
print(mesta.loc["most":"praha"])
# Pozor na pořadí.
print(mesta.loc["praha":"most"])
print(mesta.loc["most":"praha":2])
print(mesta.loc[:"most"])
print(mesta.loc["most":])
# Kdy to dává a kdy naopak nedává smysl?
# #### 2. Pouze sloupce
print(mesta.loc[:, "kraj"])
print(mesta.loc[:, ["kraj", "linky"]])
# Zkrácený zápis.
print(mesta["kraj"])
print(mesta[["kraj", "linky"]])
# #### 3. Řádky a sloupce
print(mesta.loc["plzen", "linky"])
print(mesta.loc["most":"plzen", "obyvatel"])
print(mesta.loc[["most", "brno", "praha"], ["obyvatel", "vymera"]])
# ### Výběr podle pozic řádků a sloupců
# Pro připomenutí.
print(mesta)
print(mesta.iloc[2])
print(mesta.iloc[[2]])
print(mesta.iloc[2, 1:])
print(mesta.iloc[[2, 3, 5], [0, 1]])
| StarcoderdataPython |
1942838 | class AnimalTerrestre:
def __init__(self, nome, altura):
self.nome = nome
self.altura = altura
def andar(self):
print("O animal terrestre andou")
def comer(self):
print("O animal terrestre comeu")
class AnimalAquatico:
def __init__(self, nome, especie):
self.nome = nome
self.especie = especie
def nadar(self):
print("O animal aquatico nadou")
def comer(self):
print("O animal aquatico comeu")
class Anfibio(AnimalAquatico, AnimalTerrestre):
def __init__(self, nome, peso, altura):
super().__init__(nome)
'''AnimalTerrestre.__init__(self, nome, altura) # chamada explicita, para dar preferencia a classe terrestre e não aquático mesmo estando em primeiro na herança'''
'''AnimalAquatico.__init__(self, nome, especie) # chamada explicita isso foi feito para puxar atributos das duas superclasses'''
self.peso = peso
anfibio1 = Anfibio("sapo", "100g", "10cm", "anfibio")
anfibio1.andar()
anfibio1.comer() # pegou da classe AnimalTerrestre pois é a primeira( tem preferencia)
anfibio1.nadar()
| StarcoderdataPython |
129055 | import numpy as np
def parse_res(res):
min = -1
avg = -1
if res != "":
if "," not in res:
min = int(res)
avg = int(res)
else:
all = np.array([int(r) for r in res.split(",")])
min = np.min(all)
avg = np.mean(all)
return min,avg
def get_static_final(projects, ver, result_list):
final_result = []
true_ver = []
venn_result = []
for index in range(0,len(projects)): #each project
results_by_proj = result_list[index]
tops = np.zeros(3)
ranks = np.zeros(2)
ranks_min = list() #first
ranks_avg = list() #all
actual_ver = 0
for res in results_by_proj: #result of each version
if res == "":
venn_result.append(0)
else:
#if res != "":
min,avg = parse_res(res)
if min == 1:
venn_result.append(1)
else:
venn_result.append(0)
if min == -1:
continue
if min <= 1:
tops[0]+=1
if min <= 3:
tops[1]+=1
if min <= 5:
tops[2]+=1
ranks[0]+=min
ranks[1]+=avg
ranks_min.append(min)
ranks_avg.append(avg)
actual_ver+=1
true_ver.append(actual_ver)
if actual_ver == 0:
ranks = [0,0]
else:
ranks = ranks/actual_ver
result = (int(tops[0]), int(tops[1]),int(tops[2]), round(float(ranks[0]),2), round(float(ranks[1]),2),)
result = np.array(result, dtype=object)
final_result.append(result)
#print(true_ver)
final_result = np.array(final_result)
return final_result,true_ver
def get_final(final_result,true_ver):
np_topn = final_result[:,0:3]
np_mean = final_result[:,3:5]
np_top = np.sum(np_topn,0)
np_mean = np_mean * np.transpose(true_ver).reshape(len(true_ver),1)
np_mean = np.sum(np_mean,0)/np.sum(true_ver)
final_result = np.concatenate((np_top,np_mean))
return final_result | StarcoderdataPython |
6505531 | <gh_stars>1-10
import time
import logging
import requests
from crizzle.services.base import Service as BaseService
logger = logging.Logger(__name__)
class Service(BaseService):
"""
Environment for the Poloniex exchange
"""
def sign_request_data(self, params=None, data=None, headers=None):
pass
def add_api_key(self, params=None, data=None, headers=None):
pass
def __init__(self, key_file=None):
super(Service, self).__init__('Poloniex', 'https://poloniex.com/', key=key_file)
# region Request methods
def request(self, request_type: str, endpoint: str, params=None,
api_version=None, data=None, headers=None, sign=False):
final_params = self.get_params()
if params is not None:
final_params.update(params)
logger.debug('Querying {}'.format(self.name))
request = requests.Request(None, self.root + "/{}/".format(api_version) + endpoint,
params=final_params, data=data, headers=headers)
self.add_api_key(request)
with requests.Session() as session:
if request_type in ('get', 'post', 'put', 'delete'):
request.method = request_type.upper()
else:
logger.critical('invalid request type {}'.format(request_type))
if sign:
self.sign_request_data(request, None, None)
prepped = request.prepare()
response = session.send(prepped)
return response
| StarcoderdataPython |
9725554 | import csv
INPUT_DIR = 'Input/'
OUTPUT_DIR = 'Output/'
UTILITIES_DIR = 'Utilities/'
VERBS_LIST = []
def read_data(filename):
"""" Read scv file : #0 ArticleTitle, #1 Question, #2 Answer, #3 DifficultyFromQuestioner, #4 DifficultyFromAnswerer, #5ArticleFile
and store them.
:param filename: Name of file.
:type filename: str
:rtype: list(list(str))
"""
# Create quintuplet to store
quadruple = []
file = open(filename, 'r', encoding='ISO-8859-1')
data = file.readlines()
file.close()
reader = list()
# Clean data.
# Skip first line = header, last \n & split them.
for line in data[1:]:
line = line.rstrip()
line = line.split('|')
reader.append(line)
# Keep only ArticleTitle, Question, Answer & Article.
for row in reader:
temp_quintuplet = row[0], row[1], row[2], row[5]
quadruple.append(temp_quintuplet)
file.close()
return quadruple
def read_article(article_location):
""" Read article's filtered context.
:param article_location: The article's file location.
:type article_location: str
:return: The context of file.
:rtype: str
"""
article_text = ''
with open(article_location, 'r', encoding='utf-8') as file:
for line in file:
if not line.startswith('*'):
if not line.startswith('#'):
article_text += line
return article_text
def read_util_file(filename):
""" Get filename read it and store it to a list. This function used for read files from Utilities Project's folder.
This folder contain useful lists of words, i prefer to use files and NOT hardcoded word lists in program.
:param filename: The filename in utilities_dir.
:type filename: str
:return: A list with the words are in file.
:rtype: list(str)
"""
# Load file location.
file = open(UTILITIES_DIR + filename, "r")
text = file.read()
file.close()
words = text.split(",")
return words
def read_verbs(filename):
"""" Read a file with 8567 verbs. For every verb exist all the tenses.
Return a list of list with each verb available tenses.
:param filename: Name of file.
:type filename: str
:rtype: list(list(str))
"""
file = UTILITIES_DIR + filename
with open(file, 'r') as f:
reader = csv.reader(f)
for line in reader:
VERBS_LIST.append([x for x in line if x])
| StarcoderdataPython |
8028351 | import rsa
import base64
import random
__all__ = ('passworldEncode', 'randomTemperature', 'getIMEI', 'USER_AGENT', 'USER_AGENT_YIBAN','getCheckData','randomTemperature')
pubkey = '''-----<KEY>
-----END PUBLIC KEY-----'''
key:rsa.PublicKey = rsa.PublicKey.load_pkcs1_openssl_pem(pubkey)
USER_AGENT:str = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36'
USER_AGENT_YIBAN:str = 'YiBan/5.0 Mozilla/5.0 (Linux; Android 7.1.2; V1938T Build/N2G48C; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/68.0.3440.70 Safari/537.36'
def passworldEncode(pwd:str) -> bytes:
return base64.encodebytes(rsa.encrypt(pwd.encode(), key))
def randomTemperature()->str:
return '36.'+str(random.randint(0,10))
def getIMEI()->str:
s = '86'.join(random.sample('012345678900000', 12))
return s+getMod10(s)
def getCheckData(code:int, temperature:str, loc:str = None) -> dict:
tar_dic = None
loc = '陕西省 西安市 未央区 111县道 111县 靠近陕西科技大学学生生活区' if loc == None else loc
if code == 24:
tar_dic = {
'24[0][0][name]': 'form[24][field_1588749561_2922][]',
'24[0][0][value]': temperature,
'24[0][1][name]': 'form[24][field_1588749738_1026][]',
'24[0][1][value]': loc,
'24[0][2][name]': 'form[24][field_1588749759_6865][]',
'24[0][2][value]': '是',
'24[0][3][name]': 'form[24][field_1588749842_2715][]',
'24[0][3][value]':'否',
'24[0][4][name]': 'form[24][field_1588749886_2103][]',
'24[0][4][value]': ''
}
elif code == 25:
tar_dic = {
'25[0][0][name]': 'form[25][field_1588750276_2934][]',
'25[0][0][value]': temperature,
'25[0][1][name]': 'form[25][field_1588750304_5363][]',
'25[0][1][value]': loc,
'25[0][2][name]': 'form[25][field_1588750323_2500][]',
'25[0][2][value]': '是',
'25[0][3][name]': 'form[25][field_1588750343_3510][]',
'25[0][3][value]':'否',
'25[0][4][name]': 'form[25][field_1588750363_5268][]',
'25[0][4][value]': ''
}
elif code == 13:
tar_dic={
'13[0][0][name]':'form[13][field_1587635120_1722][]',
'13[0][0][value]':temperature,
'13[0][1][name]':'form[13][field_1587635142_8919][]',
'13[0][1][value]':'正常',
'13[0][2][name]':'form[13][field_1587635252_7450][]',
'13[0][2][value]':loc,
'13[0][3][name]':'form[13][field_1587635509_7740][]',
'13[0][3][value]':'否',
'13[0][4][name]':'form[13][field_1587998920_6988][]',
'13[0][4][value]':'0',
'13[0][5][name]':'form[13][field_1587998777_8524][]',
'13[0][5][value]':'否',
'13[0][6][name]':'form[13][field_1587635441_3730][]',
'13[0][6][value]':''
}
return tar_dic
def getPn(n, arr1):
if n == 1:
return 10
else:
return mod10(getSn(n - 1, arr1)) * 2
# 求特定的取余10的结果
def mod10(num):
if num % 10 == 0:
return 10
else:
return num % 10
# 求Sn
def getSn(n, arr1):
return getPn(n, arr1) % 11 + int(arr1[14-n+1])
# 求校验码
def getMod10(code):
c = code + 'x,'
arr1 = []
for i in reversed(c):
arr1.append(i)
for j in range(0, 10):
arr1[1] = str(j)
if getSn(14, arr1) % 10 == 1:
result = ''.join(list(reversed(arr1)))
return result[:len(result) - 1] | StarcoderdataPython |
1925301 | from django.conf import settings
from django.utils.html import escape
from django.utils.http import urlquote
from mediagenerator.utils import media_url
WIDE_TWITTER_BUTTON = """
<iframe src="http://platform.twitter.com/widgets/tweet_button.html?count=horizontal&lang=en&text=%(title)s&url=%(url)s%(opttwitteruser)s" scrolling="no" frameborder="0" allowtransparency="true" style="width: 135px; height: 20px; border: none; overflow: hidden;"></iframe>
"""
FACEBOOK_LIKE_BUTTON = """
<iframe src="http://www.facebook.com/plugins/like.php?href=%(url)s&layout=button_count&show_faces=false&width=100&height=21&action=like&colorscheme=light" style="width: 100px; height: 21px; border: none; overflow: hidden; align: left; margin: 0px 0px 0px 0px;"></iframe>
"""
PLUS_ONE_BUTTON = """
<div class="g-plusone" data-size="standard" data-count="true" data-href="%(rawurl)s"></div>
"""
WIDE_BUTTONS_DIV = '<div class="wide-share-buttons" style="overflow:hidden; margin-bottom: 8px;">%s</div>'
NARROW_BUTTONS_DIV = '<div class="narrow-share-buttons" style="overflow:hidden">%s</div>'
BASE_BUTTON = '<a class="simplesocial" target="_blank" title="%(title)s" style="margin-right:5px;" href="%(url)s"><img src="%(icon)s" alt="%(title)s" width="32" height="32" /></a>'
DEFAULT_TITLE = 'Share on %s'
NARROW_BUTTONS = {
'Twitter': {
'title': 'Tweet this!',
'url': 'http://twitter.com/share?text=%(title)s&url=%(url)s%(opttwitteruser)s',
},
'Facebook': 'http://www.facebook.com/share.php?u=%(url)s&t=%(title)s',
'Email': {
'title': 'Email a friend',
'url': 'http://feedburner.google.com/fb/a/emailFlare?itemTitle=%(title)s&uri=%(url)s',
},
'Delicious': 'http://del.icio.us/post?url=%(url)s&title=%(title)s',
'Digg': 'http://digg.com/submit?url=%(url)s&title=%(title)s',
'StumbleUpon': 'http://www.stumbleupon.com/submit?url=%(url)s&title=%(title)s',
'Reddit': 'http://reddit.com/submit?url=%(url)s&title=%(title)s',
'Technorati': 'http://technorati.com/faves?sub=favthis&add=%(url)s',
}
SHOW_SOCIAL_BUTTONS = getattr(settings, 'SHOW_SOCIAL_BUTTONS',
('Twitter', 'Facebook', 'Email', 'Delicious', 'StumbleUpon',
'Digg', 'Reddit', 'Technorati'))
def narrow_buttons(request, title, url, buttons=SHOW_SOCIAL_BUTTONS):
base_url = 'http%s://%s' % ('s' if request.is_secure() else '',
request.get_host())
data = _get_url_data(request, title, url)
code = []
for name in buttons:
button = NARROW_BUTTONS[name]
if not isinstance(button, dict):
button = {'url': button}
title = escape(button.get('title', DEFAULT_TITLE % name))
url = escape(button['url'] % data)
icon = escape(button.get('icon',
media_url('simplesocial/icons32/%s.png'
% name.lower())))
if not icon.startswith(('http://', 'https://')):
icon = base_url + icon
code.append(BASE_BUTTON % {'title': title, 'url': url, 'icon': icon})
return NARROW_BUTTONS_DIV % '\n'.join(code)
def wide_buttons(request, title, url,
buttons=(WIDE_TWITTER_BUTTON, PLUS_ONE_BUTTON, FACEBOOK_LIKE_BUTTON)):
data = _get_url_data(request, title, url)
data['opttwitteruser'] = escape(data['opttwitteruser'])
code = [button % data for button in buttons]
return WIDE_BUTTONS_DIV % '\n'.join(code)
def _get_url_data(request, title, url):
url = 'http%s://%s%s' % ('s' if request.is_secure() else '',
request.get_host(), url)
data = {'url': url, 'title': title, 'opttwitteruser': ''}
twitter_username = getattr(settings, 'TWITTER_USERNAME', None)
if twitter_username:
data['opttwitteruser'] = twitter_username
for key in data:
data[key] = urlquote(data[key])
if twitter_username:
data['opttwitteruser'] = '&via=' + data['opttwitteruser']
data['rawurl'] = url
return data
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.