id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
115603 | <reponame>saji-ryu/pyxel-study<filename>src/lesson2/exercises/b_5_2.py
import pyxel
import math
pyxel.init(200, 200)
pyxel.cls(7)
for i in range(0, 360, 1):
iRadian = math.radians(i)
if(i <= 90):
pyxel.line(120, 80, 100 + 100 * math.cos(iRadian),
100 - 100 * math.sin(iRadian), 0)
elif(i <= 180):
pyxel.line(80, 80, 100 + 100 * math.cos(iRadian),
100 - 100 * math.sin(iRadian), 0)
elif(i <= 270):
pyxel.line(80, 120, 100 + 100 * math.cos(iRadian),
100 - 100 * math.sin(iRadian), 0)
else:
pyxel.line(120, 120, 100 + 100 * math.cos(iRadian),
100 - 100 * math.sin(iRadian), 0)
pyxel.show()
| StarcoderdataPython |
3247729 | import os
from pathlib import Path
import logging
import click
import numpy as np
import pandas as pd
import SimpleITK as sitk
from src.resampling.utils import (get_np_volume_from_sitk,
get_sitk_volume_from_np)
# Default paths
path_in = 'data/hecktor_nii/'
path_out = 'data/bbox_nii/'
path_bb = 'data/bbox.csv'
@click.command()
@click.argument('input_folder', type=click.Path(exists=True), default=path_in)
@click.argument('output_folder', type=click.Path(), default=path_out)
@click.argument('bounding_boxes_file', type=click.Path(), default=path_bb)
def main(input_folder, output_folder, bounding_boxes_file):
""" This command line interface allows to obtain the bounding boxes
contained in BOUNDING_BOXES_FILE as a NIFTI mask. The NIFTI files are
stored in OUTPUT_FOLDER and they contain the value 0 and 1 for outside
or inside the bounding boxe resepectively.
INPUT_FOLDER is the path of the folder containing the NIFTI
in the original reference frame and resolution (the one downloaded
from AIcrowd):
OUTPUT_FOLDER is the path of the folder where to store the
NIFTI files.
BOUNDING_BOXES_FILE is the path of the .csv file containing the
bounding boxes of each patient.
"""
logger = logging.getLogger(__name__)
logger.info('Starting to write the bb to NIFTI')
bb_dict = pd.read_csv(bounding_boxes_file).set_index('PatientID')
sitk_writer = sitk.ImageFileWriter()
sitk_writer.SetImageIO('NiftiImageIO')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
for f in Path(input_folder).rglob('*_ct.nii.gz'):
patient_name = f.name.split('_')[0]
np_volume, spacing, origin = get_np_volume_from_sitk(
sitk.ReadImage(str(f.resolve())))
bb = np.round((np.asarray([
bb_dict.loc[patient_name, 'x1'],
bb_dict.loc[patient_name, 'y1'],
bb_dict.loc[patient_name, 'z1'],
bb_dict.loc[patient_name, 'x2'],
bb_dict.loc[patient_name, 'y2'],
bb_dict.loc[patient_name, 'z2'],
]) - np.tile(origin, 2)) / np.tile(spacing, 2)).astype(int)
np_mask_bb = np.zeros_like(np_volume).astype(np.uint8)
np_mask_bb[bb[0]:bb[3], bb[1]:bb[4], bb[2]:bb[5]] = 1
sitk_mask_bb = get_sitk_volume_from_np(np_mask_bb, spacing, origin)
output_filepath = os.path.join(output_folder,
patient_name + "_bbox.nii.gz")
sitk_writer.SetFileName(output_filepath)
sitk_writer.Execute(sitk_mask_bb)
logger.info('{} done'.format(patient_name))
logger.info('All done!')
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
logging.captureWarnings(True)
main()
| StarcoderdataPython |
1604264 | <reponame>BiznetGIO/nvc-lite<filename>nvc-api/app/controllers/api/vm.py
from app.helpers.rest import *
from neo.libs import vm
from neo.libs import orchestration as orch
from app.middlewares import auth
from app.helpers.session import *
from app.libs import neo, utils
from flask_restful import Resource, request
class GetListVm(Resource):
@auth.login_required
def get(self):
nvc_images_data = list()
redis_data = utils.get_redis(request.headers['Access-Token'])
nvc_images = utils.parse_nvc_images(redis_data['region'])
nvc_images = nvc_images[redis_data['region']]
try:
nvc_images_data = neo.get_nvc(request.headers['Access-Token'],
nvc_images)
except Exception as e:
return response(401, message=str(e))
else:
return response(200, data=nvc_images_data)
| StarcoderdataPython |
83193 | # File: main.py
#
# Author: <NAME>
# Date: 2018-11-30
import argparse
import sys
import os
import xlsxwriter
import time
from DoodleParser import DoodleParser
from Solver import Solver
CONFIG_FILE = "config.in"
CONF = dict()
def error(string):
""" Print error message.
Parameters:
-----------
-`string` the message content
"""
print("[Error] {}".format(string))
def info(string):
""" Print info message.
Parameters:
-----------
-`string` the message content
"""
print("[Info] {}".format(string))
def validate_value(n):
try:
n = int(n)
except ValueError:
n = ""
if n == "" or n < 0:
error("input \"{}\" not valid.".format(n))
exit(1)
return n
def get_all_shifts(calendar):
"""
Return a sorted set containing all the shifts which may occur in a single day.
"""
shift_names = set()
for k, d in enumerate(calendar.keys()):
for k, t in enumerate(calendar.get(d).keys()):
shift_names.add(t)
return sorted(shift_names)
def parse_config_file(configFile):
"""
Extract variables from the config file given as input.
Parameters:
-----------
- `configFile` is the config file
"""
with open(configFile, 'r') as config:
for line in config.readlines():
if line[0]=='#': # Skip commented lines
continue
split = line.replace("\n", "").replace("\"","").split("=")
if len(split)==1: # Skip empty lines
continue
if split[0]=="PROB_NAME":
CONF["name"] = split[1]
elif split[0]=="OPLRUN":
CONF["oplrun"] = split[1]
elif split[0]=="OUT_DIR":
CONF["out_dir"] = split[1]
elif split[0]=="MOD_DIR":
CONF["model_dir"] = split[1]
elif split[0]=="DATA_DIR":
CONF["data_dir"] = split[1]
elif split[0]=="MOD_PROB_1":
CONF["model_file"] = split[1]
elif split[0]=="DATA_PROB_1":
CONF["data_file"] = split[1]
elif split[0]=="OUT_PROB_1":
CONF["out_file"] = split[1]
elif split[0]=="MOD_PROB_2":
CONF["model_file_min_trips"] = split[1]
elif split[0]=="DATA_PROB_2":
CONF["data_file_min_trips"] = split[1]
elif split[0]=="OUT_PROB_2":
CONF["out_file_min_trips"] = split[1]
else:
pass
def ask_for_max_shifts_per_day():
"""
Asks to user to specify the maximum number of shifts to assign to the same student in a day.
Returns:
--------
-`maxShiftsXDay` is and integer
"""
maxShiftsXDay = 1
n = input(" $> Max number of shifts to assign to a student in the same day? (format: 'val', by default is 1)")
if n!="":
maxShiftsXDay = validate_value(n)
return maxShiftsXDay
def ask_for_min_max_shifts(participants):
"""
Asks the user to specify the minimum number of shifts to assign to each user.
"""
minMaxShifts = dict()
for p in participants:
n = input(" $> Min and max number of shifts to assign to {} (format: 'min,max' or just 'min')? ".format(p)).split(',')
if len(n) == 1:
if n[0] == "":
minMaxShifts[p] = (None, None)
else:
minMaxShifts[p] = (validate_value(n[0]), None)
else:
minMaxShifts[p] = (validate_value(n[0]), validate_value(n[1]))
return minMaxShifts
def write_result_to_excel(result, output_file, problem_name):
"""
Write the result in an Excel file.
Parameters:
-----------
-`result` a dict which maps day->list, where:
-`day` is the string identifier for a day
-`list` is a dict which maps shift->student, where:
-`shift` is the string identifier of shift
-`student` is the name of the student assigned to `shift` in `day`
-`output_file` is the filename of output file
-`problem_name` is a string which names the problem, for printing purposes
"""
# Drawing parameters
interline = 1
offset_inc = 3
inter_table_summary = 3
columns = ["B", "C", "D", "E", "F"]
i_columns = [ 1, 2, 3, 4, 5 ]
days = ["Mon", "Tue", "Wed", "Thu", "Fri"]
shifts = get_all_shifts(result)
offset = 1
first_occurrence = True
# XlsxWriter creation and formats
my_workbook = xlsxwriter.Workbook(output_file)
my_worksheet = my_workbook.add_worksheet(problem_name)
default_fmt = my_workbook.add_format()
centered_fmt = my_workbook.add_format({'align':'center', 'valign':'vcenter'})
bold_cnt_fmt = my_workbook.add_format({'bold':1, 'align':'center', 'valign':'vcenter'})
# Preliminary setup
max_lenght = 0 # Max lenght of student name
for d in result.keys():
for t in result.get(d):
lenght = len(result.get(d).get(t))
max_lenght = max(max_lenght, lenght)
# Set column width and merge first cells for title
my_worksheet.set_column(0, 0, max_lenght, default_fmt)
my_worksheet.set_column(i_columns[0], i_columns[-1], max_lenght, centered_fmt)
my_worksheet.merge_range("A1:{}1".format(columns[-1]), "", centered_fmt)
# Header
my_worksheet.write("A1", "Automatic assignment for {}".format(problem_name), bold_cnt_fmt)
offset = offset + 1
for dd, cc in zip(days, columns):
my_worksheet.write( "{}{}".format(cc, str(offset)), dd, bold_cnt_fmt)
offset = offset + 1
# Loop on lines
students_stat = dict() # Shifts-assigned counter for statistics
for k, d in enumerate(result.keys()):
if d.startswith("Mon") and first_occurrence:
first_occurrence = False
if k != 0: # No increment on first week
offset = offset + offset_inc + interline
date = int(d.split(" ")[1])
for cc in columns:
if date > 31:
break
my_worksheet.write( "{}{}".format(cc, str(offset)), date, bold_cnt_fmt)
date = date + 1
for rr, t in enumerate(shifts):
my_worksheet.write( "A{}".format(str(offset+rr+1)), t, bold_cnt_fmt)
if d.startswith("Fri"):
first_occurrence = True
for k, t in enumerate(result.get(d)):
student = result.get(d).get(t)
if students_stat.get(student)==None: # Eventually initialize counter
students_stat[student] = 0
students_stat[student] = students_stat.get(student) + 1 # Increment counter
current_col = columns[ days.index(d[0:3]) ]
current_row = str(offset + shifts.index(t) + 1)
my_worksheet.write( "{}{}".format(current_col, current_row), student )
# Write summary
offset = int(current_row) + inter_table_summary
my_worksheet.write( "A{}".format(str(offset)), "Student", bold_cnt_fmt)
my_worksheet.write( "B{}".format(str(offset)), "Nr. Shifts", bold_cnt_fmt)
offset = offset + 1
for i,s in enumerate(students_stat.keys()):
current_row = str(offset + i)
current_col = "A"
my_worksheet.write( "{}{}".format(current_col, current_row), s)
current_col = "B"
my_worksheet.write( "{}{}".format(current_col, current_row), students_stat.get(s))
my_workbook.close()
def run_all_process(problem_name, model_filepath, data_filepath, output_filepath, offline, opl_exe_path, parser):
"""
Run the entire process: Doodle parsing, run the solver and output writing.
Parameters:
-----------
-`problem_name` is the string identifier for the current problem
-`model_filepath` is the path to the mod file
-`data_filepath` is the path to the dat file
-`output_filepath` is the path to the output file (create it or overwrite)
-`offline` is a boolean flag to enable the new data creation or use the existing one
-`opl_exe_path` is the path to the OPL executable
-`parser` is the DoodleParser object which collects info on participants, calendar, ...
"""
assert(problem_name), "Problem name is not defined"
assert(model_filepath), "Model file not defined"
assert(data_filepath), "Data file not defined"
assert(output_filepath), "Output file not defined"
assert(opl_exe_path), "OPL exe not defined"
assert(not(offline) or parser==None), "Offline/Parser inconsistency" # if offline then parser==None
assert(offline or parser!=None), "Offline/Parser inconsistency" # if not(offline) then parser!=None
info("Initial configuration...\tDONE")
if not(offline) and parser!=None:
# Ask to the user to specify the min, max number of shifts for each participant
numMinMaxShifts = ask_for_min_max_shifts(parser.get_participants())
numMaxShiftsPerDay = ask_for_max_shifts_per_day()
# Create the solver
solver = Solver(problem_name)
# Configure the solver
solver.set_opl_exe(opl_exe_path)
solver.set_model(model_filepath)
solver.set_data(data_filepath)
solver.set_output_file(output_filepath)
if not(offline) and parser!=None:
# Configure the problem and set data for participants, options, preferences and shifts
solver.config_problem(parser.get_participants(),
parser.get_options(),
parser.get_calendar(),
numMinMaxShifts, numMaxShiftsPerDay)
info("Configure Solver...\tDONE\n")
info("Run the solver!\n")
# Take init solve time
ts0 = time.time()
# Run the solver
opt_val, result = solver.solve()
# Take final solve time
tsf = time.time()
if opt_val==None or result == "": # Something goes wrong in solving
error("The problem has no solution.\n")
else:
info("Objective function: {}".format(opt_val))
info("Write Excel result in {}...\n".format(output_filepath))
# Save result
write_result_to_excel(result, output_filepath, CONF["name"])
# Print statistic info about elapsed time
info("Solver spent \t{0:.{digits}f} seconds.".format((tsf-ts0), digits=3))
if __name__=="__main__":
# Default parameters' assignment
execProblem1 = True
execProblem2 = True
offline = False
# Retrieve input arguments
argParser = argparse.ArgumentParser()
argParser.add_argument("pollID", help="poll identifier, take it from the Doodle link")
argParser.add_argument("--offline", help="no access to Doodle, use the existing dat file", action="store_true")
argParser.add_argument("--problem", help="select the problem you want to solve", type=int)
args = argParser.parse_args()
if args.offline==True:
offline = True
if args.problem==1:
execProblem1 = True
execProblem2 = False
elif args.problem==2:
execProblem1 = False
execProblem2 = True
# Take init time, for statistics purposes
t0 = time.time()
# Take the input arguments and the data from config file
pollID = sys.argv[1]
parse_config_file(CONFIG_FILE)
# Retrieve global information from config file
problem_name = CONF["name"]
opl_exe_path = CONF["oplrun"]
# Doodle Parsing
if not(offline):
# Parse the doodle survey
parser = DoodleParser(pollID)
info("Parsing Doodle...\tDONE")
else:
parser = None
# PROBLEM 1 : Balanced distribution
if(execProblem1):
output_filepath = os.path.join(CONF["out_dir"], CONF["out_file"])
model_filepath = os.path.join(CONF["model_dir"], CONF["model_file"])
data_filepath = os.path.join(CONF["data_dir"], CONF["data_file"])
# Start the solving of PROBLEM 1
run_all_process(problem_name, model_filepath, data_filepath, output_filepath, offline, opl_exe_path, parser)
# PROBLEM 2 : Minimize trips
if(execProblem2):
output_filepath = os.path.join(CONF["out_dir"], CONF["out_file_min_trips"])
model_filepath = os.path.join(CONF["model_dir"], CONF["model_file_min_trips"])
data_filepath = os.path.join(CONF["data_dir"], CONF["data_file_min_trips"])
# Start the solving of PROBLEM 2
run_all_process(problem_name, model_filepath, data_filepath, output_filepath, offline, opl_exe_path, parser)
tf = time.time()
info("Program ends in \t{0:.{digits}f} seconds.".format((tf-t0), digits=3))
| StarcoderdataPython |
3244116 | def how_many_different_numbers(
numbers: list
) -> int:
return len(set(numbers))
def main():
numbers = [1, 2, 3, 1, 2, 3, 4, 1]
result = how_many_different_numbers(numbers)
print(result)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1651240 | <filename>experiments/parse_args.py<gh_stars>0
import argparse
def parse_args():
parser = argparse.ArgumentParser("Reinforcement Learning experiments for multiagent environments")
# Environment
parser.add_argument("--scenario", type=str, default="simple_tag", help="name of the scenario script")
parser.add_argument("--max-episode-len", type=int, default=150, help="maximum episode length")
parser.add_argument("--num-episodes", type=int, default=50000, help="number of episodes")
parser.add_argument("--exp-name", type=str, default="st-borders-50k-0", help="name of the experiment")
parser.add_argument("--num-adversaries", type=int, default=0, help="number of adversaries")
parser.add_argument("--good-policy", type=str, default="maddpg", help="policy for good agents")
parser.add_argument("--adv-policy", type=str, default="maddpg", help="policy of adversaries")
# Core training parameters
parser.add_argument("--lr", type=float, default=1e-2, help="learning rate for Adam optimizer")
parser.add_argument("--gamma", type=float, default=0.98, help="discount factor")
parser.add_argument("--batch-size", type=int, default=1024, help="number of episodes to optimize at the same time")
parser.add_argument("--num-units", type=int, default=64, help="number of units in the mlp")
# Checkpointing
parser.add_argument("--save-dir", type=str, default="./out/", help="directory in which training state and model should be saved")
parser.add_argument("--save-rate", type=int, default=1000, help="save model once every time this many episodes are completed")
parser.add_argument("--load-dir", type=str, default="", help="directory in which training state and model are loaded")
# Evaluation
parser.add_argument("--display", action="store_true", default=False)
parser.add_argument("--plots-dir", type=str, default="./out/learning_curves/", help="directory where plot data is saved")
return parser.parse_args()
| StarcoderdataPython |
4811416 | import sys
import math
import random
class leds:
def __init__(self, call):
self.call = call
def show_next1(self, color, index):
data = [0x22, 0x01]
self.call.blewrite(data)
self.call.blewait()
def show_single(self, leftright, r, g, b):
data = [0x17, 0x01, 0x00, 0x00, 0x00]
if(leftright == "left"):
data[1] = 0x01
elif(leftright == "right"):
data[1] = 0x02
elif(leftright == "all"):
data[1] = 0x03
data[2] = r
data[3] = g
data[4] = b
self.call.blewrite(data)
self.call.blewait()
def show_all(self, r, g, b):
data = [0x17, 0x01, 0x00, 0x00, 0x00]
data[1] = 0x03
data[2] = r
data[3] = g
data[4] = b
self.call.blewrite(data)
self.call.blewait()
def color(self, value):
digit = list(map(str, range(10)))+list("abcdef")
if(isinstance(value, tuple)):
string = '#'
for i in value:
a1 = i//16
a2 = i % 16
string += digit[a1]+digit[a2]
return string
elif isinstance(value, str):
a1 = digit.index(value[1])*16+digit.index(value[2])
a2 = digit.index(value[3])*16+digit.index(value[4])
a3 = digit.index(value[5])*16+digit.index(value[6])
return [a1, a2, a3]
def trun_ring(self, buf, col):
arr = self.color(col)
buf.append(arr[0])
buf.append(arr[1])
buf.append(arr[2])
return buf
def show_all_hex(self, color):
data = [0x17, 0x03]
data = self.trun_ring(data, color)
self.call.blewrite(data)
self.call.blewait()
def show_single_hex(self, index, color):
data = [0x17, 0x01]
if(index == "left"):
data[1] = 0x01
elif(index == "right"):
data[1] = 0x02
elif(index == "all"):
data[1] = 0x03
data = self.trun_ring(data, color)
self.call.blewrite(data)
self.call.blewait()
def clear(self):
data = [0x17, 0x03, 0x00, 0x00, 0x00]
self.call.blewrite(data)
self.call.blewait()
| StarcoderdataPython |
1757140 | import pygame
def Write(fnt="Comic sans MS", fontsize=24, text="Namastey!", color=(255, 255, 255), background=None, screen=None, x=0, y=0, center=False):
"""
This funtion will write the text on the screen.
It takes as arguments:
font
fontsize
text
color
background
screen
x
y
center -> This is check is the font is to be centered
"""
f = pygame.font.Font("Utils/fonts/Roboto-Bold.ttf", fontsize)
t = f.render(text, True, color, background)
textRect = t.get_rect()
if center : textRect.center = (x, y)
screen.blit(t, textRect) | StarcoderdataPython |
32359 | from abc import ABCMeta, abstractmethod
from functools import partial
from typing import Tuple, Union
import numexpr
import numpy as np
from scipy import sparse, special
from tabmat import MatrixBase, StandardizedMatrix
from ._functions import (
binomial_logit_eta_mu_deviance,
binomial_logit_rowwise_gradient_hessian,
gamma_deviance,
gamma_log_eta_mu_deviance,
gamma_log_likelihood,
gamma_log_rowwise_gradient_hessian,
normal_deviance,
normal_identity_eta_mu_deviance,
normal_identity_rowwise_gradient_hessian,
normal_log_likelihood,
poisson_deviance,
poisson_log_eta_mu_deviance,
poisson_log_likelihood,
poisson_log_rowwise_gradient_hessian,
tweedie_deviance,
tweedie_log_eta_mu_deviance,
tweedie_log_likelihood,
tweedie_log_rowwise_gradient_hessian,
)
from ._link import IdentityLink, Link, LogitLink, LogLink
from ._util import _safe_lin_pred, _safe_sandwich_dot
class ExponentialDispersionModel(metaclass=ABCMeta):
r"""Base class for reproductive Exponential Dispersion Models (EDM).
The PDF of :math:`Y \sim \mathrm{EDM}(\mu, \phi)` is given by
.. math::
p(y \mid \theta, \phi)
&= c(y, \phi) \exp((\theta y - A(\theta)_ / \phi) \\
&= \tilde{c}(y, \phi) \exp(-d(y, \mu) / (2\phi))
with mean :math:`\mathrm{E}(Y) = A'(\theta) = \mu`, variance
:math:`\mathrm{var}(Y) = \phi \cdot v(\mu)`, unit variance
:math:`v(\mu)` and unit deviance :math:`d(y, \mu)`.
Properties
----------
lower_bound
upper_bound
include_lower_bound
include_upper_bound
Methods
-------
in_y_range
unit_variance
unit_variance_derivative
variance
variance_derivative
unit_deviance
unit_deviance_derivative
deviance
deviance_derivative
starting_mu
_mu_deviance_derivative
eta_mu_deviance
gradient_hessian
References
----------
https://en.wikipedia.org/wiki/Exponential_dispersion_model.
"""
@property
@abstractmethod
def lower_bound(self) -> float:
"""Get the lower bound of values for the EDM."""
pass
@property
@abstractmethod
def upper_bound(self) -> float:
"""Get the upper bound of values for the EDM."""
pass
@property
def include_lower_bound(self) -> bool:
"""Return whether ``lower_bound`` is allowed as a value of ``y``."""
pass
@property
def include_upper_bound(self) -> bool:
"""Return whether ``upper_bound`` is allowed as a value of ``y``."""
pass
def in_y_range(self, x) -> np.ndarray:
"""Return ``True`` if ``x`` is in the valid range of the EDM.
Parameters
----------
x : array-like, shape (n_samples,)
Target values.
Returns
-------
np.ndarray
"""
if self.include_lower_bound:
if self.include_upper_bound:
return np.logical_and(
np.greater_equal(x, self.lower_bound),
np.less_equal(x, self.upper_bound),
)
else:
return np.logical_and(
np.greater_equal(x, self.lower_bound), np.less(x, self.upper_bound)
)
else:
if self.include_upper_bound:
return np.logical_and(
np.greater(x, self.lower_bound), np.less_equal(x, self.upper_bound)
)
else:
return np.logical_and(
np.greater(x, self.lower_bound), np.less(x, self.upper_bound)
)
@abstractmethod
def unit_variance(self, mu):
r"""Compute the unit variance function.
The unit variance :math:`v(\mu)` determines the variance as a function
of the mean :math:`\mu` by
:math:`\mathrm{var}(y_i) = (\phi / s_i) \times v(\mu_i)`. It can
also be derived from the unit deviance :math:`d(y, \mu)` as
.. math::
v(\mu) = \frac{2}{\frac{\partial^2 d(y, \mu)}{\partial\mu^2}}\big|_{y=\mu}.
See also :func:`variance`.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
"""
pass
@abstractmethod
def unit_variance_derivative(self, mu):
r"""Compute the derivative of the unit variance with respect to ``mu``.
Return :math:`v'(\mu)`.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
"""
pass
def variance(self, mu: np.ndarray, dispersion=1, sample_weight=1) -> np.ndarray:
r"""Compute the variance function.
The variance of :math:`Y_i \sim \mathrm{EDM}(\mu_i, \phi / s_i)` is
:math:`\mathrm{var}(Y_i) = (\phi / s_i) * v(\mu_i)`, with unit variance
:math:`v(\mu)` and weights :math:`s_i`.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
dispersion : float, optional (default=1)
Dispersion parameter :math:`\phi`.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Weights or exposure to which variance is inverse proportional.
Returns
-------
array-like, shape (n_samples,)
"""
return self.unit_variance(mu) * dispersion / sample_weight
def variance_derivative(self, mu, dispersion=1, sample_weight=1):
r"""Compute the derivative of the variance with respect to ``mu``.
The derivative of the variance is equal to
:math:`(\phi / s_i) * v'(\mu_i)`, where :math:`v(\mu)` is the unit
variance and :math:`s_i` are weights.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
dispersion : float, optional (default=1)
Dispersion parameter :math:`\phi`.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Weights or exposure to which variance is inverse proportional.
Returns
-------
array-like, shape (n_samples,)
"""
return self.unit_variance_derivative(mu) * dispersion / sample_weight
@abstractmethod
def unit_deviance(self, y, mu):
r"""Compute the unit deviance.
In terms of the log likelihood :math:`L`, the unit deviance is
:math:`-2\phi\times [L(y, \mu, \phi) - L(y, y, \phi)].`
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
"""
pass
def unit_deviance_derivative(self, y, mu):
r"""Compute the derivative of the unit deviance with respect to ``mu``.
The derivative of the unit deviance is given by
:math:`-2 \times (y - \mu) / v(\mu)`, where :math:`v(\mu)` is the unit
variance.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
Returns
-------
array-like, shape (n_samples,)
"""
return -2 * (y - mu) / self.unit_variance(mu)
def deviance(self, y, mu, sample_weight=1):
r"""Compute the deviance.
The deviance is a weighted sum of the unit deviances,
:math:`\sum_i s_i \times d(y_i, \mu_i)`, where :math:`d(y, \mu)` is the
unit deviance and :math:`s` are weights. In terms of the log likelihood,
it is :math:`-2\phi \times [L(y, \mu, \phi / s) - L(y, y, \phi / s)]`.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Weights or exposure to which variance is inversely proportional.
Returns
-------
float
"""
if sample_weight is None:
return np.sum(self.unit_deviance(y, mu))
else:
return np.sum(self.unit_deviance(y, mu) * sample_weight)
def deviance_derivative(self, y, mu, sample_weight=1):
r"""Compute the derivative of the deviance with respect to ``mu``.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,) (default=1)
Weights or exposure to which variance is inverse proportional.
Returns
-------
array-like, shape (n_samples,)
"""
return sample_weight * self.unit_deviance_derivative(y, mu)
def _mu_deviance_derivative(
self,
coef: np.ndarray,
X,
y: np.ndarray,
sample_weight: np.ndarray,
link: Link,
offset: np.ndarray = None,
) -> Tuple[np.ndarray, np.ndarray]:
"""Compute ``mu`` and the derivative of the deviance \
with respect to coefficients."""
lin_pred = _safe_lin_pred(X, coef, offset)
mu = link.inverse(lin_pred)
d1 = link.inverse_derivative(lin_pred)
temp = d1 * self.deviance_derivative(y, mu, sample_weight)
if coef.size == X.shape[1] + 1:
devp = np.concatenate(([temp.sum()], temp @ X))
else:
devp = temp @ X # same as X.T @ temp
return mu, devp
def eta_mu_deviance(
self,
link: Link,
factor: float,
cur_eta: np.ndarray,
X_dot_d: np.ndarray,
y: np.ndarray,
sample_weight: np.ndarray,
):
"""
Compute ``eta``, ``mu`` and the deviance.
Compute:
* the linear predictor, ``eta``, as ``cur_eta + factor * X_dot_d``;
* the link-function-transformed prediction, ``mu``;
* the deviance.
Returns
-------
numpy.ndarray, shape (X.shape[0],)
The linear predictor, ``eta``.
numpy.ndarray, shape (X.shape[0],)
The link-function-transformed prediction, ``mu``.
float
The deviance.
"""
# eta_out and mu_out are filled inside self._eta_mu_deviance,
# avoiding allocating new arrays for every line search loop
eta_out = np.empty_like(cur_eta)
mu_out = np.empty_like(cur_eta)
deviance = self._eta_mu_deviance(
link, factor, cur_eta, X_dot_d, y, sample_weight, eta_out, mu_out
)
return eta_out, mu_out, deviance
def _eta_mu_deviance(
self,
link: Link,
factor: float,
cur_eta: np.ndarray,
X_dot_d: np.ndarray,
y: np.ndarray,
sample_weight: np.ndarray,
eta_out: np.ndarray,
mu_out: np.ndarray,
):
"""
Update ``eta`` and ``mu`` and compute the deviance.
This is a default implementation that should work for all valid
distributions and link functions. To implement a custom optimized
version for a specific distribution and link function, please override
this function in the subclass.
Returns
-------
float
"""
eta_out[:] = cur_eta + factor * X_dot_d
mu_out[:] = link.inverse(eta_out)
return self.deviance(y, mu_out, sample_weight=sample_weight)
def rowwise_gradient_hessian(
self,
link: Link,
coef: np.ndarray,
dispersion,
X: Union[MatrixBase, StandardizedMatrix],
y: np.ndarray,
sample_weight: np.ndarray,
eta: np.ndarray,
mu: np.ndarray,
offset: np.ndarray = None,
):
"""
Compute the gradient and negative Hessian of the log likelihood row-wise.
Returns
-------
numpy.ndarray, shape (X.shape[0],)
The gradient of the log likelihood, row-wise.
numpy.ndarray, shape (X.shape[0],)
The negative Hessian of the log likelihood, row-wise.
"""
gradient_rows = np.empty_like(mu)
hessian_rows = np.empty_like(mu)
self._rowwise_gradient_hessian(
link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
)
# To form the full Hessian matrix from the IRLS sample_weight:
# hessian_matrix = _safe_sandwich_dot(X, hessian_rows, intercept=intercept)
return gradient_rows, hessian_rows
def _rowwise_gradient_hessian(
self, link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
):
"""
Update ``gradient_rows`` and ``hessian_rows`` in place.
This is a default implementation that should work for all valid
distributions and link functions. To implement a custom optimized
version for a specific distribution and link function, please override
this function in the subclass.
"""
# FOR TWEEDIE: sigma_inv = weights / (mu ** p) during optimization bc phi = 1
sigma_inv = get_one_over_variance(self, link, mu, eta, 1.0, sample_weight)
d1 = link.inverse_derivative(eta) # = h'(eta)
# Alternatively:
# h'(eta) = h'(g(mu)) = 1/g'(mu), note that h is inverse of g
# d1 = 1./link.derivative(mu)
d1_sigma_inv = d1 * sigma_inv
gradient_rows[:] = d1_sigma_inv * (y - mu)
hessian_rows[:] = d1 * d1_sigma_inv
def _fisher_information(
self, link, X, y, mu, sample_weight, dispersion, fit_intercept
):
"""Compute the expected information matrix.
Parameters
----------
link : Link
A link function (i.e. an instance of :class:`~glum._link.Link`).
X : array-like
Training data.
y : array-like
Target values.
mu : array-like
Predicted mean.
sample_weight : array-like
Weights or exposure to which variance is inversely proportional.
dispersion : float
The dispersion parameter.
fit_intercept : bool
Whether the model has an intercept.
"""
W = (link.inverse_derivative(link.link(mu)) ** 2) * get_one_over_variance(
self, link, mu, link.inverse(mu), dispersion, sample_weight
)
return _safe_sandwich_dot(X, W, intercept=fit_intercept)
def _observed_information(
self, link, X, y, mu, sample_weight, dispersion, fit_intercept
):
"""Compute the observed information matrix.
Parameters
----------
X : array-like
Training data.
y : array-like
Target values.
mu : array-like
Predicted mean.
sample_weight : array-like
Weights or exposure to which variance is inversely proportional.
dispersion : float
The dispersion parameter.
fit_intercept : bool
Whether the model has an intercept.
"""
linpred = link.link(mu)
W = (
-link.inverse_derivative2(linpred) * (y - mu)
+ (link.inverse_derivative(linpred) ** 2)
* (
1
+ (y - mu) * self.unit_variance_derivative(mu) / self.unit_variance(mu)
)
) * get_one_over_variance(self, link, mu, linpred, dispersion, sample_weight)
return _safe_sandwich_dot(X, W, intercept=fit_intercept)
def _score_matrix(self, link, X, y, mu, sample_weight, dispersion, fit_intercept):
"""Compute the score.
Parameters
----------
X : array-like
Training data.
y : array-like
Target values.
mu : array-like
Predicted mean.
sample_weight : array-like
Weights or exposure to which variance is inversely proportional.
dispersion : float
The dispersion parameter.
fit_intercept : bool
Whether the model has an intercept.
"""
linpred = link.link(mu)
W = (
get_one_over_variance(self, link, mu, linpred, dispersion, sample_weight)
* link.inverse_derivative(linpred)
* (y - mu)
).reshape(-1, 1)
if fit_intercept:
if sparse.issparse(X):
return sparse.hstack((W, X.multiply(W)))
else:
return np.hstack((W, np.multiply(X, W)))
else:
if sparse.issparse(X):
return X.multiply(W)
else:
return np.multiply(X, W)
def dispersion(self, y, mu, sample_weight=None, ddof=1, method="pearson") -> float:
r"""Estimate the dispersion parameter :math:`\phi`.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Weights or exposure to which variance is inversely proportional.
ddof : int, optional (default=1)
Degrees of freedom consumed by the model for ``mu``.
method = {'pearson', 'deviance'}, optional (default='pearson')
Whether to base the estimate on the Pearson residuals or the deviance.
Returns
-------
float
"""
y, mu, sample_weight = _as_float_arrays(y, mu, sample_weight)
if method == "pearson":
pearson_residuals = ((y - mu) ** 2) / self.unit_variance(mu)
if sample_weight is None:
numerator = pearson_residuals.sum()
else:
numerator = np.dot(pearson_residuals, sample_weight)
elif method == "deviance":
numerator = self.deviance(y, mu, sample_weight)
else:
raise NotImplementedError(f"Method {method} hasn't been implemented.")
if sample_weight is None:
return numerator / (len(y) - ddof)
else:
return numerator / (sample_weight.sum() - ddof)
class TweedieDistribution(ExponentialDispersionModel):
r"""A class for the Tweedie distribution.
A Tweedie distribution with mean :math:`\mu = \mathrm{E}(Y)` is uniquely
defined by its mean-variance relationship
:math:`\mathrm{var}(Y) \propto \mu^{\mathrm{power}}`.
Special cases are:
====== ================
Power Distribution
====== ================
0 Normal
1 Poisson
(1, 2) Compound Poisson
2 Gamma
3 Inverse Gaussian
====== ================
Parameters
----------
power : float, optional (default=0)
The variance power of the `unit_variance`
:math:`v(\mu) = \mu^{\mathrm{power}}`. For
:math:`0 < \mathrm{power} < 1`, no distribution exists.
"""
upper_bound = np.Inf
include_upper_bound = False
def __init__(self, power=0):
# validate power and set _upper_bound, _include_upper_bound attrs
self.power = power
@property
def lower_bound(self) -> Union[float, int]:
"""Return the lowest value of ``y`` allowed."""
if self.power <= 0:
return -np.Inf
if self.power >= 1:
return 0
raise ValueError
@property
def include_lower_bound(self) -> bool:
"""Return whether ``lower_bound`` is allowed as a value of ``y``."""
if self.power <= 0:
return False
if (self.power >= 1) and (self.power < 2):
return True
if self.power >= 2:
return False
raise ValueError
@property
def power(self) -> float:
"""Return the Tweedie power parameter."""
return self._power
@power.setter
def power(self, power):
if not isinstance(power, (int, float)):
raise TypeError(f"power must be an int or float, input was {power}")
if (power > 0) and (power < 1):
raise ValueError("For 0<power<1, no distribution exists.")
# Prevents upcasting when working with 32-bit data
self._power = power if isinstance(power, int) else np.float32(power)
def unit_variance(self, mu: np.ndarray) -> np.ndarray:
"""Compute the unit variance of a Tweedie distribution ``v(mu) = mu^power``.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
Returns
-------
numpy.ndarray, shape (n_samples,)
"""
p = self.power # noqa: F841
return numexpr.evaluate("mu ** p")
def unit_variance_derivative(self, mu: np.ndarray) -> np.ndarray:
r"""Compute the derivative of the unit variance of a Tweedie distribution.
Equation: :math:`v(\mu) = p \times \mu^{(p-1)}`.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
Returns
-------
numpy.ndarray, shape (n_samples,)
"""
p = self.power # noqa: F841
return numexpr.evaluate("p * mu ** (p - 1)")
def deviance(self, y, mu, sample_weight=None) -> float:
"""Compute the deviance.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Sample weights.
"""
p = self.power
y, mu, sample_weight = _as_float_arrays(y, mu, sample_weight)
sample_weight = np.ones_like(y) if sample_weight is None else sample_weight
# NOTE: the dispersion parameter is only necessary to convey
# type information on account of a bug in Cython
if p == 0:
return normal_deviance(y, sample_weight, mu, dispersion=1.0)
if p == 1:
return poisson_deviance(y, sample_weight, mu, dispersion=1.0)
elif p == 2:
return gamma_deviance(y, sample_weight, mu, dispersion=1.0)
else:
return tweedie_deviance(y, sample_weight, mu, p=float(p))
def unit_deviance(self, y, mu):
"""Get the deviance of each observation."""
p = self.power
if p == 0: # Normal distribution
return (y - mu) ** 2
if p == 1: # Poisson distribution
return 2 * (special.xlogy(y, y / mu) - y + mu)
elif p == 2: # Gamma distribution
return 2 * (np.log(mu / y) + y / mu - 1)
else:
mu1mp = mu ** (1 - p)
return 2 * (
(np.maximum(y, 0) ** (2 - p)) / ((1 - p) * (2 - p))
- y * mu1mp / (1 - p)
+ mu * mu1mp / (2 - p)
)
def _rowwise_gradient_hessian(
self, link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
):
f = None
if self.power == 0 and isinstance(link, IdentityLink):
f = normal_identity_rowwise_gradient_hessian
elif self.power == 1 and isinstance(link, LogLink):
f = poisson_log_rowwise_gradient_hessian
elif self.power == 2 and isinstance(link, LogLink):
f = gamma_log_rowwise_gradient_hessian
elif 1 < self.power < 2 and isinstance(link, LogLink):
f = partial(tweedie_log_rowwise_gradient_hessian, p=self.power)
if f is not None:
return f(y, sample_weight, eta, mu, gradient_rows, hessian_rows)
return super()._rowwise_gradient_hessian(
link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
)
def _eta_mu_deviance(
self,
link: Link,
factor: float,
cur_eta: np.ndarray,
X_dot_d: np.ndarray,
y: np.ndarray,
sample_weight: np.ndarray,
eta_out: np.ndarray,
mu_out: np.ndarray,
):
f = None
if self.power == 0 and isinstance(link, IdentityLink):
f = normal_identity_eta_mu_deviance
elif self.power == 1 and isinstance(link, LogLink):
f = poisson_log_eta_mu_deviance
elif self.power == 2 and isinstance(link, LogLink):
f = gamma_log_eta_mu_deviance
elif 1 < self.power < 2 and isinstance(link, LogLink):
f = partial(tweedie_log_eta_mu_deviance, p=self.power)
if f is not None:
return f(cur_eta, X_dot_d, y, sample_weight, eta_out, mu_out, factor)
return super()._eta_mu_deviance(
link, factor, cur_eta, X_dot_d, y, sample_weight, eta_out, mu_out
)
def log_likelihood(self, y, mu, sample_weight=None, dispersion=None) -> float:
r"""Compute the log likelihood.
For ``1 < power < 2``, we use the series approximation by Dunn and Smyth
(2005) to compute the normalization term.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Sample weights.
dispersion : float, optional (default=None)
Dispersion parameter :math:`\phi`. Estimated if ``None``.
"""
p = self.power
y, mu, sample_weight = _as_float_arrays(y, mu, sample_weight)
sample_weight = np.ones_like(y) if sample_weight is None else sample_weight
if (p != 1) and (dispersion is None):
dispersion = self.dispersion(y, mu, sample_weight)
if p == 0:
return normal_log_likelihood(y, sample_weight, mu, float(dispersion))
if p == 1:
# NOTE: the dispersion parameter is only necessary to convey
# type information on account of a bug in Cython
return poisson_log_likelihood(y, sample_weight, mu, 1.0)
elif p == 2:
return gamma_log_likelihood(y, sample_weight, mu, float(dispersion))
elif p < 2:
return tweedie_log_likelihood(
y, sample_weight, mu, float(p), float(dispersion)
)
else:
raise NotImplementedError
def dispersion(self, y, mu, sample_weight=None, ddof=1, method="pearson") -> float:
r"""Estimate the dispersion parameter :math:`\phi`.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights or exposure to which variance is inversely proportional.
ddof : int, optional (default=1)
Degrees of freedom consumed by the model for ``mu``.
method = {'pearson', 'deviance'}, optional (default='pearson')
Whether to base the estimate on the Pearson residuals or the deviance.
Returns
-------
float
"""
p = self.power # noqa: F841
y, mu, sample_weight = _as_float_arrays(y, mu, sample_weight)
if method == "pearson":
formula = "((y - mu) ** 2) / (mu ** p)"
if sample_weight is None:
return numexpr.evaluate(formula).sum() / (len(y) - ddof)
else:
formula = f"sample_weight * {formula}"
return numexpr.evaluate(formula).sum() / (sample_weight.sum() - ddof)
return super().dispersion(
y, mu, sample_weight=sample_weight, ddof=ddof, method=method
)
class NormalDistribution(TweedieDistribution):
"""Class for the Normal (a.k.a. Gaussian) distribution."""
def __init__(self):
super().__init__(power=0)
class PoissonDistribution(TweedieDistribution):
"""Class for the scaled Poisson distribution."""
def __init__(self):
super().__init__(power=1)
class GammaDistribution(TweedieDistribution):
"""Class for the Gamma distribution."""
def __init__(self):
super().__init__(power=2)
class InverseGaussianDistribution(TweedieDistribution):
"""Class for the scaled Inverse Gaussian distribution."""
def __init__(self):
super().__init__(power=3)
class GeneralizedHyperbolicSecant(ExponentialDispersionModel):
"""A class for the Generalized Hyperbolic Secant (GHS) distribution.
The GHS distribution is for targets ``y`` in ``(-∞, +∞)``.
"""
lower_bound = -np.Inf
upper_bound = np.Inf
include_lower_bound = False
include_upper_bound = False
def unit_variance(self, mu: np.ndarray) -> np.ndarray:
"""Get the unit-level expected variance.
See superclass documentation.
Parameters
----------
mu : array-like or float
Returns
-------
array-like
"""
return 1 + mu**2
def unit_variance_derivative(self, mu: np.ndarray) -> np.ndarray:
"""Get the derivative of the unit variance.
See superclass documentation.
Parameters
----------
mu : array-like or float
Returns
-------
array-like
"""
return 2 * mu
def unit_deviance(self, y: np.ndarray, mu: np.ndarray) -> np.ndarray:
"""Get the unit-level deviance.
See superclass documentation.
Parameters
----------
y : array-like
mu : array-like
Returns
-------
array-like
"""
return 2 * y * (np.arctan(y) - np.arctan(mu)) + np.log(
(1 + mu**2) / (1 + y**2)
)
class BinomialDistribution(ExponentialDispersionModel):
"""A class for the Binomial distribution.
The Binomial distribution is for targets ``y`` in ``[0, 1]``.
"""
lower_bound = 0
upper_bound = 1
include_lower_bound = True
include_upper_bound = True
def __init__(self):
return
def unit_variance(self, mu: np.ndarray) -> np.ndarray:
"""Get the unit-level expected variance.
See superclass documentation.
Parameters
----------
mu : array-like
Returns
-------
array-like
"""
return mu * (1 - mu)
def unit_variance_derivative(self, mu):
"""Get the derivative of the unit variance.
See superclass documentation.
Parameters
----------
mu : array-like or float
Returns
-------
array-like
"""
return 1 - 2 * mu
def unit_deviance(self, y: np.ndarray, mu: np.ndarray) -> np.ndarray:
"""Get the unit-level deviance.
See superclass documentation.
Parameters
----------
y : array-like
mu : array-like
Returns
-------
array-like
"""
# see Wooldridge and Papke (1996) for the fractional case
return -2 * (special.xlogy(y, mu) + special.xlogy(1 - y, 1 - mu))
def _rowwise_gradient_hessian(
self, link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
):
if isinstance(link, LogitLink):
return binomial_logit_rowwise_gradient_hessian(
y, sample_weight, eta, mu, gradient_rows, hessian_rows
)
return super()._rowwise_gradient_hessian(
link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
)
def _eta_mu_deviance(
self,
link: Link,
factor: float,
cur_eta: np.ndarray,
X_dot_d: np.ndarray,
y: np.ndarray,
sample_weight: np.ndarray,
eta_out: np.ndarray,
mu_out: np.ndarray,
):
if isinstance(link, LogitLink):
return binomial_logit_eta_mu_deviance(
cur_eta, X_dot_d, y, sample_weight, eta_out, mu_out, factor
)
return super()._eta_mu_deviance(
link, factor, cur_eta, X_dot_d, y, sample_weight, eta_out, mu_out
)
def log_likelihood(self, y, mu, sample_weight=None, dispersion=1) -> float:
"""Compute the log likelihood.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Sample weights.
dispersion : float, optional (default=1)
Ignored.
"""
ll = special.xlogy(y, mu) + special.xlogy(1 - y, 1 - mu)
return np.sum(ll) if sample_weight is None else np.dot(ll, sample_weight)
def dispersion(self, y, mu, sample_weight=None, ddof=1, method="pearson") -> float:
r"""Estimate the dispersion parameter :math:`\phi`.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights or exposure to which variance is inversely proportional.
ddof : int, optional (default=1)
Degrees of freedom consumed by the model for ``mu``.
method = {'pearson', 'deviance'}, optional (default='pearson')
Whether to base the estimate on the Pearson residuals or the deviance.
Returns
-------
float
"""
y, mu, sample_weight = _as_float_arrays(y, mu, sample_weight)
if method == "pearson":
formula = "((y - mu) ** 2) / (mu * (1 - mu))"
if sample_weight is None:
return numexpr.evaluate(formula).sum() / (len(y) - ddof)
else:
formula = f"sample_weight * {formula}"
return numexpr.evaluate(formula).sum() / (sample_weight.sum() - ddof)
return super().dispersion(
y, mu, sample_weight=sample_weight, ddof=ddof, method=method
)
def guess_intercept(
y: np.ndarray,
sample_weight: np.ndarray,
link: Link,
distribution: ExponentialDispersionModel,
eta: Union[np.ndarray, float] = None,
):
"""
Say we want to find the scalar `b` that minimizes ``LL(eta + b)``, with \
``eta`` fixed.
An exact solution exists for Tweedie distributions with a log link and for
the normal distribution with identity link. An exact solution also exists
for the case of logit with no offset.
If the distribution and corresponding link are something else, we use the
Tweedie or normal solution, depending on the link function.
"""
avg_y = np.average(y, weights=sample_weight)
if isinstance(link, IdentityLink):
# This is only correct for normal. For other distributions, answer is unknown,
# but assume that we want sum(y) = sum(mu)
if eta is None:
return avg_y
avg_eta = eta if np.isscalar(eta) else np.average(eta, weights=sample_weight)
return avg_y - avg_eta
elif isinstance(link, LogLink):
# This is only correct for Tweedie
log_avg_y = np.log(avg_y)
assert np.isfinite(log_avg_y).all()
if eta is None:
return log_avg_y
mu = np.exp(eta)
if isinstance(distribution, TweedieDistribution):
p = distribution.power
else:
p = 1 # Like Poisson
if np.isscalar(mu):
first = np.log(y.dot(sample_weight) * mu ** (1 - p))
second = np.log(sample_weight.sum() * mu ** (2 - p))
else:
first = np.log((y * mu ** (1 - p)).dot(sample_weight))
second = np.log((mu ** (2 - p)).dot(sample_weight))
return first - second
elif isinstance(link, LogitLink):
log_odds = np.log(avg_y) - np.log(np.average(1 - y, weights=sample_weight))
if eta is None:
return log_odds
avg_eta = eta if np.isscalar(eta) else np.average(eta, weights=sample_weight)
return log_odds - avg_eta
else:
return link.link(y.dot(sample_weight))
def get_one_over_variance(
distribution: ExponentialDispersionModel,
link: Link,
mu: np.ndarray,
eta: np.ndarray,
dispersion,
sample_weight: np.ndarray,
):
"""
Get one over the variance.
For Tweedie: ``sigma_inv = sample_weight / (mu ** p)`` during optimization,
because ``phi = 1``.
For Binomial with Logit link: Simplifies to
``variance = phi / ( sample_weight * (exp(eta) + 2 + exp(-eta)))``.
More numerically accurate.
"""
if isinstance(distribution, BinomialDistribution) and isinstance(link, LogitLink):
max_float_for_exp = np.log(np.finfo(eta.dtype).max / 10)
if np.any(np.abs(eta) > max_float_for_exp):
eta = np.clip(eta, -max_float_for_exp, max_float_for_exp) # type: ignore
return sample_weight * (np.exp(eta) + 2 + np.exp(-eta)) / dispersion
return 1.0 / distribution.variance(
mu, dispersion=dispersion, sample_weight=sample_weight
)
def _as_float_arrays(*args):
"""Convert to a float array, passing ``None`` through, and broadcast."""
never_broadcast = {} # type: ignore
maybe_broadcast = {}
always_broadcast = {}
for ix, arg in enumerate(args):
if isinstance(arg, (int, float)):
maybe_broadcast[ix] = np.array([arg], dtype="float")
elif arg is None:
never_broadcast[ix] = None
else:
always_broadcast[ix] = np.asanyarray(arg, dtype="float")
if always_broadcast and maybe_broadcast:
to_broadcast = {**always_broadcast, **maybe_broadcast}
_broadcast = np.broadcast_arrays(*to_broadcast.values())
broadcast = dict(zip(to_broadcast.keys(), _broadcast))
elif always_broadcast:
_broadcast = np.broadcast_arrays(*always_broadcast.values())
broadcast = dict(zip(always_broadcast.keys(), _broadcast))
else:
broadcast = maybe_broadcast # possibly `{}`
out = {**never_broadcast, **broadcast}
return [out[ix] for ix in range(len(args))]
| StarcoderdataPython |
1684019 | <filename>src/compas_wood/datastructures/assembly.py
from typing import NewType
from compas.datastructures import Network
class Assembly(Network):
pass
| StarcoderdataPython |
73391 | <gh_stars>0
from django.test import SimpleTestCase
from django.urls import reverse, resolve
from users.views import signupuser, moncompte, loginuser, logoutuser, myproducts, myproducts_delete
class UsersTestUrls(SimpleTestCase):
def test_signup_url_is_resolved(self):
url = reverse('signupuser')
self.assertEqual(resolve(url).func, signupuser)
def test_moncompte_url_is_resolved(self):
url = reverse('moncompte')
self.assertEqual(resolve(url).func, moncompte)
def test_login_url_is_resolved(self):
url = reverse('loginuser')
self.assertEqual(resolve(url).func, loginuser)
def test_logout_url_is_resolved(self):
url = reverse('logoutuser')
self.assertEqual(resolve(url).func, logoutuser)
def test_myproducts_url_is_resolved(self):
url = reverse('myproducts')
self.assertEqual(resolve(url).func, myproducts)
def test_myproducts_delete_url_is_resolved(self):
url = reverse('myproducts_delete', args=['1'])
self.assertEqual(resolve(url).func, myproducts_delete)
| StarcoderdataPython |
3275927 | import hashlib
from typing import Dict, List, Optional, Tuple
import uuid
import boto3
from botocore.exceptions import ClientError
from meadowrun.aws_integration.aws_core import _get_default_region_name
BUCKET_PREFIX = "meadowrun"
def ensure_bucket(
region_name: str,
expire_days: int = 14,
) -> str:
"""Create an S3 bucket in a specified region if it does not exist yet.
If a region is not specified, the bucket is created in the configured default
region.
The bucket is created with a default lifecycle policy of 14 days.
Since bucket names must be globally unique, the name is bucket_prefix + region +
uuid and is returned. If a bucket with the given bucket_prefix already exists, then
that one is used.
:param bucket_name: Bucket to create
:param region_name: String region to create bucket in, e.g., 'us-west-2'
:param expire_days: int number of days after which keys are deleted by lifecycle
policy.
:return: the full bucket name
"""
s3 = boto3.client("s3", region_name=region_name)
# s3 bucket names must be globally unique accross all acounts and regions.
prefix = f"{BUCKET_PREFIX}-{region_name}"
response = s3.list_buckets()
for existing_bucket in response["Buckets"]:
if existing_bucket["Name"].startswith(prefix):
return existing_bucket["Name"]
location = {"LocationConstraint": region_name}
bucket_name = f"{prefix}-{str(uuid.uuid4())}"
s3.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=location)
s3.put_bucket_lifecycle_configuration(
Bucket=bucket_name,
LifecycleConfiguration=dict(
Rules=[
dict(
Expiration=dict(
Days=expire_days,
),
ID="meadowrun-lifecycle-policy",
# Filter is mandatory, but we don't want one:
Filter=dict(Prefix=""),
Status="Enabled",
)
]
),
)
return bucket_name
async def ensure_uploaded(
file_path: str, region_name: Optional[str] = None
) -> Tuple[str, str]:
if region_name is None:
region_name = await _get_default_region_name()
s3 = boto3.client("s3", region_name=region_name)
hasher = hashlib.blake2b()
with open(file_path, "rb") as file:
buf = file.read()
hasher.update(buf)
digest = hasher.hexdigest()
bucket_name = ensure_bucket(region_name)
try:
s3.head_object(Bucket=bucket_name, Key=digest)
return bucket_name, digest
except ClientError as error:
if not error.response["Error"]["Code"] == "404":
raise error
# doesn't exist, need to upload it
s3.upload_file(Filename=file_path, Bucket=bucket_name, Key=digest)
return bucket_name, digest
async def download_file(
bucket_name: str,
object_name: str,
file_name: str,
region_name: Optional[str] = None,
) -> None:
if region_name is None:
region_name = await _get_default_region_name()
s3 = boto3.client("s3", region_name=region_name)
s3.download_file(bucket_name, object_name, file_name)
def delete_all_buckets(region_name: str) -> None:
"""Deletes all meadowrun buckets in given region."""
s3 = boto3.client("s3", region_name=region_name)
prefix = f"{BUCKET_PREFIX}-{region_name}"
response = s3.list_buckets()
for existing_bucket in response["Buckets"]:
if existing_bucket["Name"].startswith(prefix):
bucket_name = existing_bucket["Name"]
break
else:
return
# easier to work with resource now
s3 = boto3.resource("s3", region_name=region_name)
bucket = s3.Bucket(bucket_name)
# S3 doesn't allow deleting a bucket with anything in it, so delete all objects in
# chunks of up to 1000, which is the maximum allowed.
key_chunk: List[Dict[str, str]] = []
for object in bucket.objects.all():
if len(key_chunk) == 1000:
bucket.delete_objects(Delete=dict(Objects=key_chunk))
key_chunk.clear()
key_chunk.append(dict(Key=object.key))
bucket.delete_objects(Delete=dict(Objects=key_chunk))
bucket.delete()
| StarcoderdataPython |
1769632 | """View takes in a csv from client with full name, company and domain
This will put their full name and domain into ten different variations to
see if an email exists. Slow but mimicks human behavior. Each check utilizes
a proxy to reduce risk of limit on RealEmail API. Proxy Scraper scrapes 100 fresh
IP addresses and is rotated through out each email variation check."""
import requests
from bs4 import BeautifulSoup as bs
from random import choice
from django.views import View
from django.http import JsonResponse
from django.views.generic.detail import SingleObjectMixin
import json
from selenium import webdriver
from time import sleep
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
from requests.adapters import HTTPAdapter
from urllib3.util import Retry
# CONFIGURATION for realEmail GET requests
retry_strategy = Retry(
total=10,
status_forcelist=[429, 500, 502, 503, 504],
method_whitelist=["HEAD", "GET", "OPTIONS"]
)
adapter = HTTPAdapter(max_retries=retry_strategy)
real_response = requests.Session()
real_response.mount("https://", adapter)
real_response.mount("http://", adapter)
def scrape_proxies():
"""SCRAPES PROXIES FROM SITE USING SELENIUM TO GET PAST
THE THREAT DETECTION BLOCKED LANDING PAGE
IMPORTANT: MUST OBTAIN COOKIE BY CLICKING APPROVE IN WEBDRIVER
AND RERUN VALIDATION; WORKAROUND TODO"""
# path to be changed for deployment
PATH = 'C:\Program Files (x86)/chromedriver.exe'
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors-spki-list')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--ignore-ssl-errors')
driver = webdriver.Chrome(PATH, chrome_options=options)
driver.get("http://sslproxies.org/")
soup = bs(driver.page_source, 'lxml')
table = soup.find('tbody')
ips = table.select('tr > td')[::8]
ports = table.select('tr > td')[1::8]
driver.close()
complete_ip = []
for index in range(len(ips)):
complete_ip.append(ips[index].contents[0] + ':' + ports[index].contents[0])
print('Proxy Scraping Completed')
return complete_ip
def scrape_proxies_requests():
"""Obtains 100 updated proxies to be used for testing if email exists using
the requests library. Landing page for threat detection blocked causes error
"""
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:20.0) Gecko/20100101 Firefox/20.0',
'Referrer-Policy': 'strict-origin-when-cross-origin',
'Accept': '*/*',
'Connection': 'keep-alive'
}
response = requests.get("http://sslproxies.org/", headers=headers, verify=False)
soup = bs(response.content, "html.parser")
table = soup.find('tbody')
ips = table.select('tr > td')[::8]
ports = table.select('tr > td')[1::8]
# DEFAULT IP ADDRESSES LAST CHECKED 28-07-21
complete_ip = ['http://192.168.3.11:8080', '172.16.58.3:8118']
for index in range(len(ips)):
# create a list of scraped proxies for rotation
complete_ip.append(ips[index].contents[0] + ':' + ports[index].contents[0])
return complete_ip
def variation_list(target):
"""Outputs 8 variations to be used in testing if email exists, domain and
full name are required from the uploaded input
INPUT@DOMAIN tbd based on name input into the db"""
nameArr = target['name'].lower().split(' ')
atDomain = '@' + target['domain']
# 1st combined no seperator
firstVariation = ''.join(nameArr) + atDomain
# 2nd combined seperator as dot
secondVariation = '.'.join(nameArr) + atDomain
# 3rd reverse lname fname
thirdVariation = ''.join(nameArr[::-1]) + atDomain
# 4th reverse lname fname with dot
fourthVariation = '.'.join(nameArr[::-1]) + atDomain
# 5th fname first letter with lastname
fnameLetter = nameArr[0][0]
fifthVariation = fnameLetter + nameArr[1] + atDomain
# 6th fname letter with lastname dot seperated
sixthVariation = fnameLetter + '.' + nameArr[1] + atDomain
# 7th lname letter with fname
lnameLetter = nameArr[1][0]
seventhVariation = lnameLetter + nameArr[0] + atDomain
# 8th lname letter with fname with dot seperator
eigthVariation = lnameLetter + '.' + nameArr[0] + atDomain
# 9th variation if all else fails info@domain can be used if it exists
ninthVariation = nameArr[0] + atDomain
variationObj= {
1: firstVariation,
2: secondVariation,
3: thirdVariation,
4: fourthVariation,
5: fifthVariation,
6: sixthVariation,
7: seventhVariation,
8: eigthVariation,
9: ninthVariation,
10: 'info' + atDomain
}
return variationObj
class ValidateView(SingleObjectMixin, View):
"""Accepts Full Name, Domain, and Email Address from uploadCSV.js
Rotates proxies with 100 updated IP addresses and uses
Real Email API;
LIMIT: checks up to 100 emails per day per ip address
causing issues with max tries exceeded XXXXXXX
"""
def post(self, request):
unicode_data = request.body.decode('utf-8')
targetData = json.loads(unicode_data)
proxies = scrape_proxies()
# will return end results in JSON response to client
end_results = []
for target in targetData:
if target['name'] == '':
print('Completed')
break
# problem with some firewall blocking the connections during the TLS handshake?
# multi threading can be used for quicker performance
variationObj = variation_list(target)
for email_address in variationObj.values():
# make requests to real email import requests
running_check = True
while running_check:
new_proxy = choice(proxies)
print('Checking: ' + email_address + ' with ' + new_proxy)
try:
response = real_response.get(
"https://isitarealemail.com/api/email/validate",
# REAL EMAIL WAS USED FOR THE ABILITY TO CHANGE PROXIES AND NOT TAINT ONES OWN IP ADDRESS
params = {'email': email_address},
proxies= {
'https': 'https://' + new_proxy
},
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:20.0) Gecko/20100101 Firefox/20.0',
'Referrer-Policy': 'strict-origin-when-cross-origin',
'Accept': '*/*',
},
timeout= 20
)
print('Running....')
except:
print('Excepted')
running_check = False
if response.status_code == 200:
status = response.json()['status']
# sleep(20)
if status == "valid":
print("is valid: ", email_address)
if {
"name": target['name'],
"company": target['company'],
"category": target['category'],
"email": email_address,
"email_confirmed": True,
} not in end_results:
end_results.append({
"name": target['name'],
"company": target['company'],
"category": target['category'],
"email": email_address,
"email_confirmed": True,
})
running_check = False
elif status == "invalid":
print("is invalid: ", email_address)
if {
"name": target['name'],
"company": target['company'],
"category": target['category'],
"email": ''
} not in end_results:
end_results.append({
"name": target['name'],
"company": target['company'],
"category": target['category'],
"email": ''
})
running_check = False
else:
print("is unknown: ", email_address)
if {
"name": target['name'],
"company": target['company'],
"category": target['category'],
"email": ''
} not in end_results:
end_results.append({
"name": target['name'],
"company": target['company'],
"category": target['category'],
"email": ''
})
running_check = False
sleep(5)
print(end_results)
return JsonResponse(end_results, safe= False) | StarcoderdataPython |
166354 | <filename>abyssal_modules/metrics.py
from prometheus_client import Counter
COUNTER_MODULES_CREATED = Counter(
'mutaplasmid_modules_created',
'Number of modules created',
['type']
)
| StarcoderdataPython |
1758780 | <gh_stars>0
# -*- coding: UTF-8 -*-
from main import create_app, db
app = create_app()
with app.app_context():
db.drop_all()
db.create_all()
| StarcoderdataPython |
137962 | <reponame>vicarmar/audio2score<filename>src/audio2score/test.py
import argparse
import csv
import os
from datetime import datetime
from pathlib import Path
import torch
from tqdm import tqdm
from audio2score.data.data_loader import (AudioDataLoader, BucketingSampler,
SpectrogramDataset)
from audio2score.utils import (LabelDecoder, calculate_cer, calculate_ler, calculate_wer,
config_logger, load_model)
def main():
parser = argparse.ArgumentParser(
description='Audio2Score testing',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-m', '--model-path',
default='models/model_default.pth',
help='Path to model file created by training',
required=True)
parser.add_argument('-t', '--test-manifest',
metavar='DIR',
help='path to validation manifest csv',
required=True,
default='data/test_manifest.csv')
parser.add_argument('-bs', '--batch-size',
default=20,
type=int,
help='Batch size for training')
parser.add_argument('-nw', '--num-workers',
default=4,
type=int,
help='Number of workers used in dataloading')
parser.add_argument('--no-cuda',
action="store_true",
help='Do not use cuda to test model')
parser.add_argument(
'--silent',
action="store_true",
help="Do not log out decoded output and error of each sample")
args = parser.parse_args()
test(args)
def test(args):
save_folder = os.path.dirname(args.model_path)
if not save_folder:
save_folder = './'
manifest_name = '_'.join([*Path(args.test_manifest).parts[-2:]])
test_job = f"test_{manifest_name}_{Path(args.model_path).with_suffix('.log').name}"
log_file = f'{save_folder}/{datetime.now().strftime("%Y%m%d-%H%M%S")}_{test_job}'
logger = config_logger('test', log_file=log_file, console_level='ERROR')
torch.set_grad_enabled(False)
model, _ = load_model(args.model_path)
device = torch.device("cpu" if args.no_cuda else "cuda")
label_decoder = LabelDecoder(model.labels)
model.eval()
model = model.to(device)
test_dataset = SpectrogramDataset(audio_conf=model.audio_conf,
manifest_filepath=args.test_manifest,
labels=model.labels)
test_sampler = BucketingSampler(test_dataset, batch_size=args.batch_size)
test_loader = AudioDataLoader(test_dataset,
batch_sampler=test_sampler,
num_workers=args.num_workers)
test_sampler.shuffle(1)
total_wer, total_cer, total_ler, num_words, num_chars, num_labels = 0, 0, 0, 0, 0, 0
for i, (data) in tqdm(enumerate(test_loader),
total=len(test_loader),
ascii=True):
inputs, targets, input_sizes, target_sizes, filenames = data
inputs = inputs.to(device)
input_sizes = input_sizes.to(device)
outputs = model.transcribe(inputs, input_sizes)
for i, target in enumerate(targets):
# Avoid decoding the target, but load original uncoded krnseq.
# This allows to use different datasets that could be encoded with
# a different label encoder than the current one used by the model.
# reference = label_decoder.decode(target[:target_sizes[i]].tolist())
krnseq_path = Path(filenames[i]).with_suffix('.krnseq')
with open(krnseq_path, 'r') as krnseq_file:
reference = krnseq_file.read()
transcript = label_decoder.decode(outputs[i])
wer, trans_words, ref_words = calculate_wer(
transcript, reference, '\t')
cer, trans_chars, ref_chars = calculate_cer(
transcript, reference, '\t')
ler, trans_labels, ref_labels = calculate_ler(
transcript, reference)
total_wer += wer
num_words += ref_words
total_cer += cer
num_chars += ref_chars
total_ler += ler
num_labels += ref_labels
if not args.silent:
logger.info(f"File: {filenames[i]}")
logger.info(f"WER: {float(wer) / ref_words}")
logger.info(f"CER: {float(cer) / ref_chars}")
logger.info(f"LER: {float(ler) / ref_labels}")
logger.info(
"\n===================================== \nREFERENCE:")
logger.info(f'\n{reference}')
logger.info(
"\n===================================== \nHYPOTHESIS:")
logger.info(f'\n{transcript}')
logger.info("")
wer = 100 * float(total_wer) / num_words
cer = 100 * float(total_cer) / num_chars
ler = 100 * float(total_ler) / num_labels
logger.info(
f'Test Summary \tAverage WER {wer:.3f}\tAverage CER {cer:.3f}\tAverage LER {ler:.3f}'
)
model_id = Path(args.model_path).name
results_path = f'{save_folder}/test_results.csv'
file_exists = os.path.isfile(results_path)
with open(results_path, 'a') as resfile:
wr = csv.writer(resfile)
if not file_exists:
wr.writerow(['Dataset', 'Model', 'WER', 'CER', 'LER'])
wr.writerow([
manifest_name, model_id, f'{wer:.3f}', f'{cer:.3f}', f'{ler:.3f}'
])
if __name__ == '__main__':
main()
| StarcoderdataPython |
1630083 | # Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import zip
from copy import deepcopy
from moldesign.units import *
def grid_map(f,v,dims,grids):
"""
Map function values along a grid
:param f: function to be evaluated, call signature f(v)
:param v: vector that sets the static coordinates
:param dims: ndims-length list of dimensions to vary
:param grids: ndims-length list of grid values for each dimension
:return: function value grid
"""
vmod = deepcopy(v)
for idx, vals in enumerate(zip(*[g.flat for g in grids])):
for idim, val in zip(dims, vals): vmod[idim] = val
if idx == 0:
firstf = f(vmod)
gridZ = np.zeros(grids[0].shape) * firstf
gridZ.flat[0] = firstf
else:
gridZ.flat[idx] = f(vmod)
return gridZ
def function_slice(f,v,dims,ranges):
"""
Return an arbitrary dimensional slice of function values
:param f: function to be evaluated, call signature f(v)
:param v: vector that sets the static coordinates
:param dims: ndims-length list of dimensions to vary
:param ranges: ndims-list of values along those dimensions
:return: gridpoints, function values
"""
assert len(dims)==len(ranges)
if len(ranges)>1:
grids = np.meshgrid(*ranges)
else:
grids=list(ranges)
for igrid,(r,g) in enumerate(zip(ranges,grids)):
grids[igrid] = units_transfer(r,g)
gridZ = grid_map(f,v,dims,grids)
return grids,gridZ
| StarcoderdataPython |
1665150 | import bisect
N, Q = map(int, input().split())
A = list(map(int, input().split()))
X = [int(input()) for i in range(Q)]
A.reverse()
cums = [0]
cums_e = [0]
for i, a in enumerate(A):
cums.append(cums[-1] + a)
cums_e.append(cums_e[-1] + (0 if i % 2 else a))
borders = []
scores = []
for i in range(1, N // 2 + N % 2):
b = (A[i] + A[2 * i]) // 2 + 1
borders.append(b)
s = cums[i] + cums_e[-1] - cums_e[2 * i]
scores.append(s)
if N % 2:
scores.append(cums[N // 2 + 1])
else:
scores.append(cums[N // 2])
borders.reverse()
scores.reverse()
ans = []
for x in X:
i = bisect.bisect(borders, x)
ans.append(scores[i])
print(*ans, sep='\n')
| StarcoderdataPython |
3318364 | <reponame>ZeldaZach/AdventOfCode2020
import pathlib
from typing import List
def get_input_data(file: str) -> List[str]:
with pathlib.Path(file).open() as f:
content = f.readlines()
return content
def day3_part1(data: List[str], right: int = 3, down: int = 1):
total = 0
check_index = 0
for row in data[::down]:
total += row[check_index] == "#"
check_index += right
if check_index >= len(row) - 1:
check_index = check_index - len(row) + 1
return total
def day3_part2(data: List[str]):
total = 1
for right, down in [[1, 1], [3, 1], [5, 1], [7, 1], [1, 2]]:
total *= day3_part1(data, right, down)
return total
if __name__ == "__main__":
print(day3_part1(get_input_data("input.txt")))
print(day3_part2(get_input_data("input.txt")))
| StarcoderdataPython |
1607769 | <filename>kanpai/array.py
from .validator import Validator, RequiredMixin
class Array(RequiredMixin):
def __init__(self, error="Expecting an array.", convert_none_to_empty=False):
self.processors = []
self.processors.append({
'action': self.__assert_array,
'attribs': {
'error': error,
'convert_none_to_empty': convert_none_to_empty
}
})
def __assert_array(self, data, attribs):
if data is None:
if attribs.get('convert_none_to_empty', False):
return self.validation_success([])
else:
return self.validation_success(data)
if type(data) is list or type(data) is tuple:
return self.validation_success(data)
else:
return self.validation_error(data, attribs.get('error'))
def of(self, element_validator):
if not isinstance(element_validator, Validator):
raise TypeError(
f'Expecting a instance of validator in element_validator')
self.processors.append({
'action': self.__validate_elements,
'attribs': {
'element_validator': element_validator
}
})
return self
def __validate_elements(self, data, attribs):
if data is None:
return self.validation_success(data)
validation_success = True
validation_error = {}
validated_data = []
element_validator = attribs.get('element_validator')
for index, element in enumerate(data):
validation_result = element_validator.validate(element)
validation_success = validation_success and validation_result.get(
'success')
validated_data.append(validation_result.get('data'))
if validation_result.get('success') is False:
validation_error[index] = validation_result.get('error')
return {
'success': validation_success,
'data': validated_data,
'error': validation_error
}
def min(self, min_length, error=None):
if type(min_length) is not int:
raise ValueError(
'value for min_length is expected to be an integer')
if error is None:
error = f"At least {min_length} element required."
self.processors.append({
'action': self.__assert_min,
'attribs': {
'min_length': min_length,
'error': error
}
})
return self
def __assert_min(self, data, attribs):
if data is not None and len(data) < attribs['min_length']:
return self.validation_error(data, attribs['error'])
else:
return self.validation_success(data)
def max(self, max_length, error=None):
if type(max_length) is not int:
raise ValueError(
'value for max_length is expected to be an integer')
if error is None:
error = f"Maximum {max_length} element allowed."
self.processors.append({
'action': self.__assert_max,
'attribs': {
'max_length': max_length,
'error': error
}
})
return self
def __assert_max(self, data, attribs):
if data is not None and len(data) > attribs['max_length']:
return self.validation_error(data, attribs['error'])
else:
return self.validation_success(data)
| StarcoderdataPython |
174983 | """The flake8 command execution script. This is used by the deploying
job mainly.
Command example:
$ python ./scripts/run_flake8.py
"""
import sys
from logging import Logger
sys.path.append('./')
import scripts.command_util as command_util
from apysc._console import loggers
from scripts.apply_lints_and_build_docs import FLAKE8_COMMAND
logger: Logger = loggers.get_info_logger()
def _main() -> None:
"""
Run the flake8 command.
Raises
------
Exception
If command standard out is not blank.
"""
logger.info('flake8 command started.')
stdout: str = command_util.run_command(command=FLAKE8_COMMAND)
if stdout != '':
raise Exception('There are flake8 errors or warning.')
if __name__ == '__main__':
_main()
| StarcoderdataPython |
3330405 | <reponame>godzilla-but-nicer/boolmininfo<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
import seaborn as sns
from itertools import chain, combinations
from copy import copy, deepcopy
# modified from itertools documentation
def powerset(iterable):
"powerset([1,2,3]) --> (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(1, len(s)+1))
def exclude_subsets(iter_of_sets):
"""
This function takes an interable of sets and returns a new list with all
sets that are a subset of any other eliminated
"""
keep_sets = []
for si in iter_of_sets:
any_subset = False
for sj in iter_of_sets:
if si != sj and set(si).issubset(sj):
any_subset = True
break
if not any_subset:
keep_sets.append(si)
return keep_sets
def contains_subsets(iter_of_sets):
"""
Checks whether a collection of sets contains any sets which are subsets of
another set in the collection
"""
for si in iter_of_sets:
for sj in iter_of_sets:
if si != sj and set(sj).issubset(si):
return True
return False
def PID_sets(k):
""" This function returns a list of the sets in the redundancy lattice """
double_powerset = list(powerset(powerset(range(0, k))))
keep_sets = []
for subset in double_powerset:
if not contains_subsets(subset):
keep_sets.append(subset)
return keep_sets
def partial_order(alpha, beta):
""" Check whether alpha preceeds beta on the redundancy lattice. This is the
partial ordering we apply to the set calculated by PID sets """
# for every element in beta there must be an element in alpha that is a
# subset of it.
for b, elem_b in enumerate(beta):
has_subset = False
for a, elem_a in enumerate(alpha):
if set(elem_a).issubset(elem_b):
has_subset = True
# if we didn't find a subset for any b in beta we're done
if has_subset == False:
return False
# if we dont find a violation than it must be true
return True
def redundancy_lattice(pid_sets):
""" construct the redundancy lattice using the partial ordering defined
above """
# this gets all of the paths but it doesn't get us the edges that we
# actually want. The problem is, for example our relationship is satisfied
# for beta = {1, 2, 3} and alpha = {{1}, {2}, {3}}. What we want are
# actually the longest paths through our graph. thats why negative weight
#
# this approach is more or less copied from ryan james lattice package
# https://github.com/dit/lattices/blob/master/lattices/lattice.py
D_temp = nx.DiGraph()
for atom_a, atom_b in combinations(pid_sets, 2):
if partial_order(atom_a, atom_b):
D_temp.add_edge(atom_a, atom_b, weight=-1)
elif partial_order(atom_b, atom_a):
D_temp.add_edge(atom_b, atom_a, weight=-1)
# We want the edges that make up the longest paths
lengths = nx.algorithms.all_pairs_bellman_ford_path_length(D_temp)
# new graph with only the edges we care about
lattice = nx.DiGraph()
lattice.add_nodes_from(D_temp.nodes())
# now we can select the paths we want
for p, path in lengths:
for w, weight in path.items():
if weight == -1:
lattice.add_edge(p, w)
return lattice
def pretty_labels_map(atom_labels):
"""
transforms all of these crazy tuples into the notation used in williams and
beer I_min PID
"""
rename_map = {}
for label in atom_labels:
new_label = str(label)
# eliminate commas and spaces
new_label = new_label.replace(',', '')
new_label = new_label.replace(' ', '')
# replace braces
new_label = new_label.replace('(', '{')
new_label = new_label.replace(')', '}')
# put them in a map
rename_map[label] = new_label[1:-1]
return rename_map
def get_y_positions(n_inputs, vertical_height, pad):
"""
Calculate the y position for nodes on the horizontal redundancy lattice
"""
num_atoms = {2: 4,
3: 18,
4: 166}
four_nodes = np.linspace(pad, vertical_height - pad, 4)
three_nodes = four_nodes[:3]
# this is for 3 inputs this code will have to be changed for more
y_pos = np.zeros(num_atoms[n_inputs]) + (vertical_height / 3)
y_pos[1:4] = three_nodes
y_pos[4:7] = three_nodes
y_pos[7:11] = four_nodes
y_pos[11:14] = three_nodes
y_pos[14:17] = three_nodes
return y_pos
def get_node_color_sequence(palette, node):
color_dict = [palette[0],
palette[1],
palette[1],
palette[1],
palette[2],
palette[2],
palette[2],
palette[3],
palette[3],
palette[3],
palette[3],
palette[4],
palette[4],
palette[4],
palette[5],
palette[5],
palette[5],
palette[6]]
return color_dict
def pid_plot(pid_series, n_inputs=3):
"""
Takes a set of values from a partial information decomposition and
returns a plot to aid in quick comparisons between decompositions.
"""
# calculate lattice
D = redundancy_lattice(PID_sets(3))
# Get the values we need for sorting
D_top = list(nx.topological_sort(D))[0]
from_top = nx.single_source_shortest_path_length(D, source=D_top)
D_sorted = sorted(list(D.nodes()), key=lambda x: from_top[x])
# sort lattice, relabel
label_map = pretty_labels_map(D.nodes())
fancy = nx.relabel_nodes(D, label_map)
fancy_sorted = [label_map[n] for n in D_sorted]
# drop the rule column
pid_series = pid_series.drop('rule', axis=1)
# unpack keys and labels from dictionary into lists
print(D_sorted)
values = np.array([pid_series[str(atom)].values[0] for atom in D_sorted])
values_norm = values / np.sum(values)
fig, ax = plt.subplots(nrows=2, sharex=True, gridspec_kw={'height_ratios': [2.5, 1]})
# get the dimensions of the axes for the lattice
bbox = ax[1].get_window_extent()
# calculate positions for nodes
nhpad = 50
nvpad = 20
node_x = np.linspace(nhpad, bbox.width - nhpad, len(D.nodes()))
node_y = get_y_positions(n_inputs, bbox.height, nvpad)
pos_dict = {lab:(x, y) for lab, x, y in zip(fancy_sorted, node_x, node_y)}
# use this color palette to indicate redundancy -> synergy
pal = sns.color_palette('RdBu', 7)
pal[3] = (.9, .9, .9)
# hard coded node color sequence for now
node_color = get_node_color_sequence(pal, fancy_sorted)
# draw network
ax[1].vlines(node_x, ymin=min(node_y), ymax=max(node_y), color=node_color,
linestyle='dotted', alpha=0.8)
nx.draw_networkx(fancy, ax=ax[1], node_size=5, pos=pos_dict, font_size=10,
arrows=False, alpha=0.6, with_labels=False,
edge_color='grey', node_color='grey')
nx.draw_networkx_labels(fancy, ax=ax[1], pos=pos_dict,
verticalalignment='center', font_size=8)
ax[1].spines['top'].set_visible(False)
ax[1].spines['bottom'].set_visible(False)
ax[1].spines['left'].set_visible(False)
ax[1].spines['right'].set_visible(False)
# actual bar plot
# we're going to shade regions on the barplot
halfway = np.diff(node_x)[0] / 2
first_point = node_x[0] - halfway
mid_points = np.hstack((first_point, node_x + halfway))
al = 0.2
value_min = min(values_norm)
value_max = max(values_norm)
# fill sections from redundancy to synergy
ax[0].fill_between(mid_points[:2], value_min, value_max,
color=pal[0], edgecolor=pal[0], alpha=al)
ax[0].fill_between(mid_points[1:5], value_min, value_max,
color=pal[1], edgecolor=pal[1], alpha=al)
ax[0].fill_between(mid_points[4:8], value_min, value_max,
color=pal[2], edgecolor=pal[2], alpha=al)
ax[0].fill_between(mid_points[7:12], value_min, value_max,
color=pal[3], edgecolor=pal[3], alpha=al)
ax[0].fill_between(mid_points[11:15], value_min, value_max,
color=pal[4], edgecolor=pal[4], alpha=al)
ax[0].fill_between(mid_points[14:18], value_min, value_max,
color=pal[5], edgecolor=pal[5], alpha=al)
ax[0].fill_between(mid_points[17:], value_min, value_max,
color=pal[6], edgecolor=pal[6], alpha=al)
# also we're going to normalize the information values
values_norm = np.array(values) / np.sum(values)
ax[0].bar(node_x, values_norm, width=(node_x[2] - node_x[1]) * 0.8)
ax[0].axhline(c='k')
ax[0].set_xticks([])
ax[0].set_ylabel('Proportion of Mutual Information')
ax[0].spines['top'].set_visible(False)
ax[0].spines['right'].set_visible(False)
ax[0].spines['bottom'].set_visible(False)
fig.tight_layout(h_pad=0)
return ax | StarcoderdataPython |
1723242 | import sys, os
from pathlib import Path # Python 3.6
from dotenv import load_dotenv
from .tools.wrapper import bcolors
env_path = Path(".") / ".pytic"
load_dotenv(dotenv_path=env_path)
def _check_file_coverage(f, lines):
def_count = 0
wrapper_count = 0
for i, line in enumerate(lines):
if "def" in line.strip():
def_count += 1
if ".register_event" not in lines[i-1] or "#" in lines[i-1].strip()[:15]:
print(f'{bcolors.FAIL}[FAIL] {bcolors.ENDC}{f} line:{i} {line.strip()[:-1].replace("def ", "")} does not have event registration')
else:
print(f'{bcolors.OKGREEN}[PASS] {bcolors.ENDC}{f} line:{i} {line.strip()[:-1].replace("def ", "")}')
wrapper_count +=1
if wrapper_count and def_count:
return round((wrapper_count / def_count) * 100, 2)
else:
return round(0)
def _no_exclude(f):
no_check = os.environ.get("EXCLUDE_VENV").split(",")
for nc in no_check:
if nc in f:
return False
return True
def _check_code_coverate():
files = [os.path.join(path, name) for path, subdirs, files in os.walk(".") for name in files]
file_cov = []
for f in files:
if _no_exclude(f) and f[-3:] == ".py":
with open(f, 'r') as cov_file:
lines = cov_file.readlines()
fc = _check_file_coverage(f, lines)
if fc == 100.0:
file_cov.append(f'{f} {bcolors.OKGREEN}{fc}% {bcolors.ENDC}')
else:
file_cov.append(f'{f} {bcolors.FAIL}{fc}% {bcolors.ENDC}')
for fc in file_cov:
print(fc)
if __name__ == "__main__":
if "coverage" in sys.argv:
print(f'{bcolors.WARNING}Checking autopytic code coverage...{bcolors.ENDC}')
_check_code_coverate()
| StarcoderdataPython |
131278 | <gh_stars>1-10
import collections
import random
import math
import statistics
class Utils():
def log(message):
print(message)
class Simulator():
def do_quest(self, player_state):
heart = FriarLoc()
neck = FriarLoc()
elbow = FriarLoc()
while heart.progress < 4: #1 progress for each NC/superlikely, 1 for encountering 5 Dudes and 1 progress for either 8 dudes total or 1 Bob.
heart.resolve_turn(player_state)
while neck.progress < 4: #1 progress for each NC/superlikely, 1 for encountering 5 Dudes and 1 progress for either 8 dudes total or 1 Bob.
neck.resolve_turn(player_state)
while elbow.progress < 4: #1 progress for each NC/superlikely, 1 for encountering 5 Dudes and 1 progress for either 8 dudes total or 1 Bob.
elbow.resolve_turn(player_state)
return player_state, heart, neck, elbow
def run_simulator(self, iterations = 1000):
turns = []
banishes = []
superlikelies = []
for a in range(iterations):
player_state, heart, neck, elbow = self.do_quest(PlayerState())
turns.append(player_state.get_total_turns_spent())
pity1 = heart.get_superlikelies_encountered()
pity2 = heart.get_superlikelies_encountered()
pity3 = neck.get_superlikelies_encountered()
total_pity = pity1 + pity2 + pity3
superlikelies.append(total_pity)
Utils.log("In {} instances at {}% +NC, an average of {} pity NCs with {} turns between, it took an average of {} turns to complete the quest, with a median of {} and a deviation of {}."
.format(iterations, PlayerState().player_nc, statistics.mean(superlikelies), heart.get_pity_cooldown(), statistics.mean(turns), statistics.median(turns), statistics.pstdev(turns)))
class PlayerState():
def __init__(self):
self.player_nc = 29 # Change your current +non-combat% modifier here.
self.player_item = 200 # Change your current +item% modifier here.
self.total_turns_spent = 0
self.wishes = 3
self.banishes = [ #"Banish Name", duration, max_available_uses, has_cooldown, is_free
Banisher("Spring Bumper", 30, 999, True, True), #Add number of banishes in a variable
Banisher("Throw Latte", 30, 4, True, True),
Banisher("Reflex hammer", 30, 3, False, True),
Banisher("KGB dart", 20, 3, False, True),
Banisher("Batter Up!", 9999, 999, False, False)
]
self.copiers = [ #"Copy Name", duration, max_available_uses, has_cooldown, number_of_copies, ignores_rejection
Copier("Olfaction", 40, 999, True, 3, False),
Copier("Share Latte", 30, 3, True, 2),
Copier("Mating Call", 999, 1, False, 1)
]
self.tracked_phylum = "Dude"
self.olfacted_mob = None
self.latted_mob = None
self.mated_mob = None
self.inventory = {
"ketchup hound": 0,
"another item": 0,
"disposable instant camera": 1,
"photograph of a dog": 0,
"I Love Me, Vol. I": 0,
"photograph of a red nugget": 0,
"photograph of an ostrich egg": 0,
"photograph of God": 0
}
def get_player_nc(self):
return self.player_nc
def nc_mod(self):
return mod_cap(self.player_nc)
def item_mod(self):
return 1 + (self.player_item/100)
def get_total_turns_spent(self):
return self.total_turns_spent
def incr_total_turns_spent(self):
self.total_turns_spent += 1
def get_wishes(self):
return self.wishes
def get_banishes(self):
return self.banishes
def get_olfacted_mob(self):
return self.olfacted_mob
def set_olfacted_mob(self, encounter_name):
self.olfacted_mob = encounter_name
def reset_olfacted_mob(self):
self.olfacted_mob = None
def get_extra_copies(self, encounter):
if encounter.get_phylum() == self.tracked_phylum:
extra_copies = 2
else:
extra_copies = 0
name = encounter.get_name()
for copier in self.copiers:
if copier.get_copied_mob(self) == name:
extra_copies += copier.get_copies()
return extra_copies
def get_inventory_item(self, item):
return self.inventory[item]
def incr_inventory_item(self, item):
self.inventory[item] += 1
def decr_inventory_item(self, item):
self.inventory[item] -= 1
def check_copier(self, location, encounter):
for copier in self.copiers:
if (copier.get_copied_mob(self) != encounter.get_name()) and copier.check(self):
copier.use(location, self, encounter)
return True
def choose_banish(self, encounter):
avail_banish = None
for banish in self.banishes:
if banish.get_banished_mob(self) == encounter.get_name():
return False
if (not avail_banish) and banish.check(self):
avail_banish = banish
return avail_banish
return avail_banish
def check_banish(self, location, encounter):
if location.get_banishes_left():
banish = self.choose_banish(encounter)
if banish:
banish.use(location, self, encounter)
return True
return False
def mod_cap(virgin_mod):
if virgin_mod < 0:
return 0
if virgin_mod > 25:
return 20 + math.floor(virgin_mod/5)
return virgin_mod
class Copier():
def __init__(self, name = "", length = 30, avail_uses = 3, cooldown = False, copies = 1, rejection = True):
self.name = name
self.length = length
self.avail_uses = avail_uses
self.cooldown = cooldown
self.copies = copies
self.rejection = rejection
self.copied_mob = None
self.expiry = -1
def get_avail_uses(self):
return self.avail_uses
def get_copies(self):
return self.copies
def get_copied_mob(self, player_state):
if self.get_expiry() < player_state.get_total_turns_spent():
if not self.rejection:
player_state.reset_olfacted_mob()
return None
return self.copied_mob
def get_expiry(self):
return self.expiry
def check(self, player_state):
return (self.get_avail_uses()) and (self.get_expiry() < player_state.get_total_turns_spent())
def use(self, location, player_state, encounter):
name = encounter.get_name()
self.copied_mob = name
self.avail_uses -= 1
self.expiry = player_state.get_total_turns_spent() + self.length
if not self.rejection:
player_state.set_olfacted_mob(name)
class Banisher():
def __init__(self, name = "", length = 30, avail_uses = 3, cooldown = False, free = True):
self.name = name
self.length = length
self.avail_uses = avail_uses
self.cooldown = cooldown
self.free = free
self.banished_mob = None
self.expiry = -1
def get_avail_uses(self):
return self.avail_uses
def get_expiry(self):
return self.expiry
def get_banished_mob(self, player_state):
if self.get_expiry() < player_state.get_total_turns_spent():
return None
return self.banished_mob
def check(self, player_state):
return (self.get_avail_uses()) and (self.get_expiry() < player_state.get_total_turns_spent())
def use(self, location, player_state, encounter):
self.banished_mob = encounter.get_name()
self.avail_uses -= 1
self.expiry = player_state.get_total_turns_spent() + self.length
if self.free:
location.toggle_free_turn()
location.incr_banishes_used()
class Encounter():
def __init__(self, name = "", phylum = None, banish = False, copy = False):
self.name = name
self.phylum = phylum
self.wish = False
self.should_banish = banish
self.should_copy = copy
def __str__(self):
return "Encounter({})".format(self.name)
def get_name(self):
return self.name
def get_phylum(self):
return self.phylum
def get_use_all_sniffs(self):
return self.use_all_sniffs
def check(self, player_state):
for banish in player_state.get_banishes():
if banish.get_banished_mob(player_state) == self.name:
return False
return True
def add_nc_queue(self, location, nc = None):
if nc is None:
nc = self.name
location.append_nc_history(nc)
def add_com_queue(self, location, combat = None):
if combat is None:
combat = self.name
location.append_combat_history(combat)
def run(self, location, player_state):
if self.should_banish:
player_state.check_banish(location, self)
if self.should_copy:
player_state.check_copier(location, self)
self.add_com_queue(location)
if location.get_free_turn():
location.toggle_free_turn()
return True
location.incr_turns_spent()
player_state.incr_total_turns_spent()
class Location():
def __init__(self, native_nc, superlikelies, non_combats, combats, banishes_to_commit=0, pity_cooldown=0):
self.native_nc = native_nc
self.superlikelies = superlikelies
self.non_combats = non_combats
self.combats = combats
self.nc_history = collections.deque([], 5)
self.combat_history = collections.deque([], 5)
self.banishes_to_commit = banishes_to_commit
self.pity_cooldown = pity_cooldown
self.banishes_used = 0
self.free_turn = False
self.turns_spent = 0
def get_non_combats(self):
return self.non_combats
def get_free_turn(self):
return self.free_turn
def get_banishes_used(self):
return self.banishes_used
def incr_banishes_used(self):
self.banishes_used += 1
def get_pity_cooldown(self):
return self.pity_cooldown
def get_turns_spent(self):
return self.turns_spent
def incr_turns_spent(self):
self.turns_spent += 1
def select_encounter(self, player_state):
encounter = self.select_superlikely(player_state)
if encounter is None:
encounter = self.select_nc(player_state)
if encounter is None:
encounter = self.select_combat(player_state)
return encounter
def select_superlikely(self, player_state):
for superlikely in self.superlikelies:
if superlikely.check(self, player_state):
return superlikely
return None
def append_nc_history(self, nc):
self.nc_history.append(nc)
def append_combat_history(self, combat):
self.combat_history.append(combat)
def weighted_random(self, weights):
total = sum(weight for item, weight in weights.items())
if not total:
return None
r = random.randint(1, total)
for (item, weight) in weights.items():
r -= weight
if r <= 0:
return item
def get_nc_weights(self, player_state):
nc_weights = {}
for encounter in [nc for nc in self.non_combats if nc.check(self, player_state)]:
name = encounter.get_name()
copies = 1 #+ (player_state.get_extra_copies(encounter) if name not in nc_weights.keys() else 0) #I have not sorted this yet.
nc_weights[name] = copies if name in self.nc_history else (4 * copies)
return nc_weights
def select_nc(self, player_state):
if not len(self.non_combats):
return None
actual_nc = self.native_nc + player_state.get_player_nc()
if actual_nc == 0:
return None
if random.randrange(100) > actual_nc:
return None
encounter_name = self.weighted_random(self.get_nc_weights(player_state))
if encounter_name:
return [nc for nc in self.non_combats if nc.name == encounter_name][0]
return None
def get_combat_weights(self, player_state):
combat_weights = {}
for encounter in [monster for monster in self.combats if monster.check(player_state)]:
name = encounter.get_name()
copies = 1 + (player_state.get_extra_copies(encounter) if name not in combat_weights.keys() else 0)
combat_weights[name] = copies if (name in self.combat_history and not player_state.get_olfacted_mob() == name) else (4 * copies)
return combat_weights
def select_combat(self, player_state):
encounter_name = self.weighted_random(self.get_combat_weights(player_state))
return [combat for combat in self.combats if combat.name == encounter_name][0]
return None
def toggle_free_turn(self):
self.free_turn = not self.free_turn
def resolve_turn(self, player_state):
encounter = None
loops = 0
while (encounter is None) and (loops < 100):
encounter = self.select_encounter(player_state)
loops += 1
if encounter is not None:
encounter.run(self, player_state)
#Verbose information underneath
#print("{}: {} at {} progress.".format(player_state.get_total_turns_spent(), encounter.get_name(), self.get_progress()))
class FriarLoc(Location):
class PityNC(Encounter):
def __init__(self, name = ""):
self.name = name
def check(self, location, player_state):
#return location.get_turns_spent() == 10 or (location.get_pity_timer() > (location.get_pity_cooldown() - random.randint(1,2)))
return location.get_pity_timer() == location.get_pity_cooldown()
def run(self, location, player_state):
location.incr_superlikelies()
nc = location.get_non_combats()
nc[0].run(location, player_state)
class ProgressNC(Encounter):
def __init__(self, name = ""):
self.name = name
def check(self, location, player_state):
return True
def run(self, location, player_state):
location.incr_turns_spent()
location.incr_progress()
location.set_pity_timer(1)
player_state.incr_total_turns_spent()
class Combat(Encounter):
def run(self, location, player_state):
self.add_com_queue(location)
location.incr_pity_timer()
if self.should_banish:
player_state.check_banish(location, self)
if self.should_copy:
player_state.check_copier(location, self)
if location.get_free_turn():
location.toggle_free_turn()
return True
location.incr_turns_spent()
player_state.incr_total_turns_spent()
def __init__(self):
Location.__init__(
self,
5, #Native Non-Combat rate of location
[ #Superlikelies go here
FriarLoc.PityNC("Pity NC")
],
[ #NCs go here
FriarLoc.ProgressNC("Progress NC")
],
[ #"Combat Name", "Phylum", should_banish, should_sniff
FriarLoc.Combat("Imp 1", "Demon", False, False),
FriarLoc.Combat("Imp 2", "Demon", False, False),
FriarLoc.Combat("Imp 3", "Demon", False, False)
],
0, #Number of banishes to commit to the location
4 #Turns between each pity
)
self.progress = 0
self.pity_timer = 0
self.quest_items = 0
self.dudes_fought = 0
self.superlikelies_encountered = 0
def get_banishes_left(self):
return self.banishes_used < self.banishes_to_commit
def get_progress(self):
return self.progress
def incr_progress(self):
self.progress += 1
def get_pity_timer(self):
return self.pity_timer
def incr_pity_timer(self):
self.pity_timer += 1
def set_pity_timer(self, value):
self.pity_timer = value
def reset_pity_timer(self):
self.pity_timer = 0
def get_superlikelies_encountered(self):
return self.superlikelies_encountered
def incr_superlikelies(self):
self.superlikelies_encountered += 1
if __name__ == "__main__":
Simulator().run_simulator()
| StarcoderdataPython |
116885 | # -*- coding: utf-8 -*-
"""Amazon SQS boto3 interface."""
from __future__ import absolute_import, unicode_literals
try:
import boto3
except ImportError:
boto3 = None
| StarcoderdataPython |
148806 | from bfxhfindicators.indicator import Indicator
from bfxhfindicators.ema import EMA
from bfxhfindicators.accumulation_distribution import AccumulationDistribution
from math import isfinite
class ChaikinOsc(Indicator):
def __init__(self, short, long, cache_size=None):
self._shortEMA = EMA(short, cache_size)
self._longEMA = EMA(long, cache_size)
self._adl = AccumulationDistribution()
super().__init__({
'args': [short, long, cache_size],
'id': 'chaikinosc',
'name': 'ChaikinOsc(%f, %f)' % (short, long),
'seed_period': max([short, long]),
'data_type': 'candle',
'data_key': '*',
'cache_size': cache_size
})
def reset(self):
super().reset()
self._shortEMA.reset()
self._longEMA.reset()
self._adl.reset()
def update(self, candle):
self._adl.update(candle)
adl = self._adl.v()
if not isfinite(adl):
return
self._shortEMA.update(adl)
self._longEMA.update(adl)
short = self._shortEMA.v()
long = self._longEMA.v()
if (isfinite(short) and isfinite(long)):
super().update(short - long)
return self.v()
def add(self, candle):
self._adl.add(candle)
adl = self._adl.v()
if not isfinite(adl):
return
self._shortEMA.add(adl)
self._longEMA.add(adl)
short = self._shortEMA.v()
long = self._longEMA.v()
if (isfinite(short) and isfinite(long)):
super().add(short - long)
return self.v()
| StarcoderdataPython |
24962 | from django import template
register = template.Library()
@register.inclusion_tag('quiz/correct_answer.html', takes_context=True)
def correct_answer_for_all(context, question):
"""
processes the correct answer based on a given question object
if the answer is incorrect, informs the user
"""
answers = question.get_answers()
incorrect_list = context.get('incorrect_questions', [])
if question.id in incorrect_list:
user_was_incorrect = True
else:
user_was_incorrect = False
return {'previous': {'answers': answers},
'user_was_incorrect': user_was_incorrect}
@register.filter
def answer_choice_to_string(question, answer):
return question.answer_choice_to_string(answer)
| StarcoderdataPython |
181864 | <reponame>eskilop/TgThemer-py<gh_stars>1-10
from tgthemer import Color
colorgroup = {
"base": '#FF18181F',
"result": '#FF30303E',
"alpha": '#8018181F',
"s_int": -15198177,
"argb": (255, 24, 24, 31)
}
colorgroup24 = {
"base": '#18181F',
"result": '#30303E',
"s_int": 1579039,
"argb": (24, 24, 31)
}
class TestColor(object):
def test_hex_prop(self):
assert Color(colorgroup["base"]).hex == colorgroup["base"]
def test_sint_prop(self):
assert Color(colorgroup["base"]).sint == colorgroup["s_int"]
def test_argb_prop(self):
assert Color(colorgroup["base"]).argb == colorgroup["argb"]
def test_alpha(self):
assert Color(colorgroup["base"]).alpha(-0.5).hex == "#8018181F"
def test_lighten(self):
assert Color(colorgroup["base"]).lighten(1) == Color("#FF30303E")
def test_darken(self):
assert Color(colorgroup["result"]).lighten(-0.5) == Color(colorgroup["base"])
# tests for 24bit
def test_hex_prop24(self):
assert Color(colorgroup24["base"]).hex == colorgroup24["base"]
def test_sint_prop24(self):
assert Color(colorgroup24["base"]).sint == colorgroup24["s_int"]
def test_argb_prop24(self):
assert Color(colorgroup24["base"]).argb == colorgroup24["argb"]
def test_lighten24(self):
assert Color(colorgroup24["base"]).lighten(1) == Color(colorgroup24["result"])
def test_darken24(self):
assert Color(colorgroup24["result"]).lighten(-0.5) == Color(colorgroup24["base"]) | StarcoderdataPython |
82694 | <filename>example/convertCaffe/layers3.py
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
from tensorflow.keras.layers import Layer as KLayer
import numpy as np
import time
import helper
import pickle
params_dict = {}
def init_caffe_input(x):
global caffe_string, layer_counter
if not 'caffe_string' in globals():
caffe_string = ''
if not 'layer_counter' in globals():
layer_counter = 0
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%x[1]()
caffe_string += ' type: "Input"\n'
caffe_string += ' top: "%s"\n'%x[1]()
caffe_string += ' input_param{\n shape{\n dim:%d\n dim:%d\n dim:%d\n dim:%d\n }\n }\n}\n'%(x[0].shape[0], x[0].shape[3], x[0].shape[1], x[0].shape[2])
layer_counter += 1
# def pad_correction(x, conv_layer):
# # TF padding is shifted by 1 compared to caffe
# # we achieve this by creating a dummy layer
# global caffe_string, layer_counter
# if not 'caffe_string' in globals():
# caffe_string = ''
# if not 'layer_counter' in globals():
# layer_counter = 0
# layer_name = 'dummy%d'%layer_counter
# caffe_string += 'layer{\n'
# caffe_string += ' name: "%s"\n'%layer_name
# caffe_string += ' type: "Input"\n'
# caffe_string += ' top: "%s"\n'%layer_name
# caffe_string += ' input_param{\n shape{\n dim:%d\n dim:%d\n dim:%d\n dim:%d\n }\n }\n}\n'%(x.shape[0], x.shape[3], x.shape[1], x.shape[2])
# layer_name0 = layer_name
# layer_name = 'crop%d'%layer_counter
# caffe_string += 'layer{\n'
# caffe_string += ' name: "%s"\n'%layer_name
# caffe_string += ' type: "Crop"\n'
# caffe_string += ' bottom: "%s"\n'%conv_layer
# caffe_string += ' bottom: "%s"\n'%layer_name0
# caffe_string += ' top: "%s"\n'%layer_name
# caffe_string += ' crop_param{\n offset:%d\n offset:%d\n }\n}\n'%(1,1)
# return layer_name
def pad_correction(x, conv_layer):
# TF padding is shifted by 1 compared to caffe
# We dont have dummy data and cropping layers.
# We achieve this by incorporating a 2x2 depthwise conv
def get_kernel(outchn):
res = np.zeros([2,2, outchn, 1]).astype(np.float32)
for i in range(outchn):
res[1,1,i] = 1
return res
global caffe_string, layer_counter
if not 'caffe_string' in globals():
caffe_string = ''
if not 'layer_counter' in globals():
layer_counter = 0
layer_name = 'padshift%d'%layer_counter
outchn = x.shape[-1]
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%layer_name
caffe_string += ' type: "Convolution"\n'
caffe_string += ' bottom: "%s"\n'%conv_layer
caffe_string += ' top: "%s"\n'%layer_name
caffe_string += ' convolution_param{\n'
caffe_string += ' num_output: %d\n'%outchn
caffe_string += ' bias_term: %s\n'%('false')
caffe_string += ' group: %d\n'%outchn
caffe_string += ' stride: 1\n'
caffe_string += ' pad_h: 0\n'
caffe_string += ' pad_w: 0\n'
caffe_string += ' kernel_h: 2\n'
caffe_string += ' kernel_w: 2\n'
caffe_string += ' }\n}\n'
params_dict[layer_name] = {}
params_dict[layer_name]['dwkernel'] = get_kernel(outchn)
return layer_name
def save_params(name):
pickle.dump(params_dict, open(name, 'wb'))
# dumb layer declaration
class Layer(KLayer):
"""
Layer template. Implement some basic functions by override initialize, build and forward.
"""
def __init__(self, *args, **kwargs):
"""
Default initialization. Not recommended to touch.
"""
super(Layer, self).__init__()
self.initialize(*args, **kwargs)
def initialize(self, *args, **kwargs):
"""
Write initialization logic here.
This is a method to assign pre-defined parameters to the class.
"""
pass
def build(self, input_shape):
pass
def call(self, x, *args, **kwargs):
return self.forward(x, *args, **kwargs)
def forward(self, x, *args, **kwargs):
"""
Alternative for *call*.
:param x: Input tensor or numpy array. The object will be automatically converted to tensor if the input is np.array. Note that other arrays in args or kwargs will not be auto-converted.
"""
pass
class conv2D(KLayer):
"""
Basic convolution 2D layer
"""
def __init__(self, size, outchn, stride=1,pad='SAME',dilation_rate=1,usebias=True,values=None):
"""
:type size: int or list[int]
:param size: Indicate the size of convolution kernel.
:type outchn: int
:param outchn: Number of output channels
:type stride: int or list[int]
:param stride: Stride number. Can be either integer or list of integers
:type pad: String
:param pad: Padding method, must be one of 'SAME', 'VALID', 'SAME_LEFT'. 'VALID' does not use auto-padding scheme. 'SAME' uses tensorflow-style auto-padding and 'SAME_LEFT' uses pytorch-style auto-padding.
:type dilation_rate: int or list[int]
:param dilation_rate: Dilation rate. Can be either integer or list of integers. When dilation_rate is larger than 1, stride should be 1.
:type usebias: bool
:param usebias: Whether to add bias term in this layer.
:type values: list[np.array]
:param values: If the param 'values' is set, the layer will be initialized with the list of numpy array.
"""
super(conv2D, self).__init__()
self.size = size
self.outchn = outchn
self.stride = stride
self.usebias = usebias
self.values = values
self.dilation_rate = dilation_rate
assert (pad in ['SAME','VALID','SAME_LEFT'])
self.pad = pad
def _parse_args(self, input_shape):
inchannel = input_shape[0][-1]
# parse args
if isinstance(self.size,list):
self.size = [self.size[0],self.size[1],inchannel,self.outchn]
else:
self.size = [self.size, self.size, inchannel, self.outchn]
# set stride
if isinstance(self.stride,list):
self.stride = [1,self.stride[0],self.stride[1],1]
else:
self.stride = [1,self.stride, self.stride, 1]
# set dilation
if isinstance(self.dilation_rate,list):
self.dilation_rate = [1,self.dilation_rate[0],self.dilation_rate[1],1]
else:
self.dilation_rate = [1,self.dilation_rate,self.dilation_rate,1]
def build(self, input_shape):
values = self.values
self._parse_args(input_shape)
if self.values is not None:
self.kernel = self.add_variable('kernel', shape=self.size, initializer=tf.initializers.constant(values[0]))
else:
self.kernel = self.add_variable('kernel', shape=self.size, initializer=tf.initializers.VarianceScaling(scale=2.0, mode='fan_out', distribution='untruncated_normal'))
if self.usebias:
if self.values is not None:
self.bias = self.add_variable('bias', shape=[self.outchn], initializer=tf.initializers.constant(values[1]))
else:
self.bias = self.add_variable('bias', shape=[self.outchn], initializer=tf.initializers.constant(0.0))
if self.pad == 'SAME_LEFT':
self.pad_value = [self.size[0]//2, self.size[1]//2]
def _write_caffe(self, name, out):
global caffe_string, layer_counter
if not 'caffe_string' in globals():
caffe_string = ''
if not 'layer_counter' in globals():
layer_counter = 0
layer_name = 'conv%d'%layer_counter
stride = self.stride[1]
if stride==1:
pad = self.size[0]//2
else:
pad = self.size[0]//2 + 1
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%layer_name
caffe_string += ' type: "Convolution"\n'
caffe_string += ' bottom: "%s"\n'%name()
caffe_string += ' top: "%s"\n'%layer_name
caffe_string += ' convolution_param{\n'
caffe_string += ' num_output: %d\n'%self.outchn
caffe_string += ' bias_term: %s\n'%('true' if self.usebias else 'false')
caffe_string += ' group: 1\n'
caffe_string += ' stride: %d\n'%stride
caffe_string += ' pad_h: %d\n'%pad
caffe_string += ' pad_w: %d\n'%pad
caffe_string += ' kernel_h: %d\n'%(self.size[0])
caffe_string += ' kernel_w: %d\n'%(self.size[1])
caffe_string += ' }\n}\n'
params_dict[layer_name] = {}
params_dict[layer_name]['kernel'] = self.kernel.numpy()
if self.usebias:
params_dict[layer_name]['bias'] = self.bias.numpy()
if stride>1:
layer_name = pad_correction(out, layer_name)
layer_counter += 1
return helper.LayerName(layer_name)
def call(self, x):
"""
:param x: Input tensor or numpy array. The object will be automatically converted to tensor if the input is np.array. Note that other arrays in args or kwargs will not be auto-converted.
"""
name = x[1]
x = x[0]
if self.pad=='SAME_LEFT':
x = tf.pad(x, [[0,0], [self.pad_value[0], self.pad_value[0]], [self.pad_value[1], self.pad_value[1]], [0,0]])
pad = 'VALID'
else:
pad = self.pad
out = tf.nn.conv2d(x, self.kernel, self.stride, pad, dilations=self.dilation_rate)
if self.usebias:
out = tf.nn.bias_add(out, self.bias)
lname = self._write_caffe(name, out)
return out, lname
class dwconv2D(KLayer):
"""
Basic depth-wise convolution layer.
"""
def __init__(self, size, multiplier, stride=1,pad='SAME',dilation_rate=1,usebias=True,values=None):
"""
:type size: int or list[int]
:param size: Indicate the size of convolution kernel.
:type multiplier: int
:param multiplier: Multiplier of number of output channel. (outchannel = multiplier * inchannel)
:type stride: int or list[int]
:param stride: Stride number. Can be either integer or list of integers
:type pad: String
:param pad: Padding method, must be one of 'SAME', 'VALID', 'SAME_LEFT'. 'VALID' does not use auto-padding scheme. 'SAME' uses tensorflow-style auto-padding and 'SAME_LEFT' uses pytorch-style auto-padding.
:type dilation_rate: int or list[int]
:param dilation_rate: Dilation rate. Can be either integer or list of integers. When dilation_rate is larger than 1, stride should be 1.
:type usebias: bool
:param usebias: Whether to add bias term in this layer.
:type values: list[np.array]
:param values: If the param 'values' is set, the layer will be initialized with the list of numpy array.
"""
super(dwconv2D, self).__init__()
self.size = size
self.multiplier = multiplier
self.stride = stride
self.usebias = usebias
self.values = values
self.dilation_rate = dilation_rate
assert (pad in ['SAME','VALID','SAME_LEFT'])
self.pad = pad
def _parse_args(self, input_shape):
inchannel = input_shape[0][-1]
self.inchannel = inchannel
self.outchn = inchannel * self.multiplier
# parse args
if isinstance(self.size,list):
self.size = [self.size[0],self.size[1],inchannel,self.multiplier]
else:
self.size = [self.size, self.size, inchannel, self.multiplier]
# set stride
if isinstance(self.stride,list):
self.stride = [1,self.stride[0],self.stride[1],1]
else:
self.stride = [1,self.stride, self.stride, 1]
# set dilation
if isinstance(self.dilation_rate,list):
self.dilation_rate = [self.dilation_rate[0],self.dilation_rate[1]]
else:
self.dilation_rate = [self.dilation_rate,self.dilation_rate]
def build(self, input_shape):
values = self.values
self._parse_args(input_shape)
if self.values is not None:
self.kernel = self.add_variable('kernel', shape=self.size, initializer=tf.initializers.constant(values[0]))
else:
self.kernel = self.add_variable('kernel', shape=self.size, initializer=tf.initializers.VarianceScaling(scale=2.0, mode='fan_out', distribution='untruncated_normal'))
if self.usebias:
if self.values is not None:
self.bias = self.add_variable('bias', shape=[self.outchn], initializer=tf.initializers.constant(values[1]))
else:
self.bias = self.add_variable('bias', shape=[self.outchn], initializer=tf.initializers.constant(0.0))
if self.pad == 'SAME_LEFT':
self.pad_value = [self.size[0]//2, self.size[1]//2]
def _write_caffe(self, name, out):
global caffe_string, layer_counter
if not 'caffe_string' in globals():
caffe_string = ''
if not 'layer_counter' in globals():
layer_counter = 0
layer_name = 'conv%d'%layer_counter
stride = self.stride[1]
if stride==1:
pad = self.size[0]//2
else:
pad = self.size[0]//2 + 1
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%layer_name
caffe_string += ' type: "Convolution"\n'
caffe_string += ' bottom: "%s"\n'%name()
caffe_string += ' top: "%s"\n'%layer_name
caffe_string += ' convolution_param{\n'
caffe_string += ' num_output: %d\n'%self.outchn
caffe_string += ' bias_term: %s\n'%('true' if self.usebias else 'false')
caffe_string += ' group: %d\n'%self.inchannel
caffe_string += ' stride: %d\n'%stride
caffe_string += ' pad_h: %d\n'%pad
caffe_string += ' pad_w: %d\n'%pad
caffe_string += ' kernel_h: %d\n'%(self.size[0])
caffe_string += ' kernel_w: %d\n'%(self.size[1])
caffe_string += ' }\n}\n'
params_dict[layer_name] = {}
params_dict[layer_name]['dwkernel'] = self.kernel.numpy()
if self.usebias:
params_dict[layer_name]['bias'] = self.bias.numpy()
if stride>1:
layer_name = pad_correction(out, layer_name)
layer_counter += 1
return helper.LayerName(layer_name)
def call(self, x):
"""
:param x: Input tensor or numpy array. The object will be automatically converted to tensor if the input is np.array. Note that other arrays in args or kwargs will not be auto-converted.
"""
name = x[1]
x = x[0]
if self.pad=='SAME_LEFT':
x = tf.pad(x, [[0,0], [self.pad_value[0], self.pad_value[0]], [self.pad_value[1], self.pad_value[1]], [0,0]])
pad = 'VALID'
else:
pad = self.pad
out = tf.nn.depthwise_conv2d(x, self.kernel, self.stride, pad, dilations=self.dilation_rate)
if self.usebias:
out = tf.nn.bias_add(out, self.bias)
lname = self._write_caffe(name, out)
return out, lname
class globalAvgpoolLayer(KLayer):
"""
Basic global average pooling layer
"""
def __init__(self):
super(globalAvgpoolLayer, self).__init__()
def build(self, input_shape):
self.num_dim = len(input_shape[0])
self.ksize = input_shape[0][1]
def _write_caffe(self, name):
global caffe_string, layer_counter
if not 'caffe_string' in globals():
caffe_string = ''
if not 'layer_counter' in globals():
layer_counter = 0
layer_name = 'gavgpool%d'%layer_counter
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%layer_name
caffe_string += ' type: "Pooling"\n'
caffe_string += ' bottom:"%s"\n'%name()
caffe_string += ' top: "%s"\n'%layer_name
caffe_string += ' pooling_param{\n pool:AVE\n kernel_size:%d\n }\n'%self.ksize
caffe_string += '}\n'
return helper.LayerName(layer_name)
def call(self, x):
"""
:param x: Input tensor or numpy array. The object will be automatically converted to tensor if the input is np.array. Note that other arrays in args or kwargs will not be auto-converted.
"""
name = x[1]
x = x[0]
if self.num_dim==3:
res = tf.reduce_mean(x, axis=1, keepdims=True)
elif self.num_dim==4:
res = tf.reduce_mean(x, axis=[1,2], keepdims=True)
elif self.num_dim==5:
res = tf.reduce_mean(x, axis=[1,2,3], keepdims=True)
lname = self._write_caffe(name)
return res , lname
class activation(KLayer):
"""
Basic activation layer
"""
def __init__(self, param, **kwargs):
"""
Possible values:
- model3.PARAM_RELU
- model3.PARAM_LRELU
- model3.PARAM_ELU
- model3.PARAM_TANH
- model3.PARAM_MFM
- model3.PARAM_MFM_FC
- model3.PARAM_SIGMOID
- model3.PARAM_SWISH
"""
super(activation, self).__init__()
self.param = param
self.kwargs = kwargs
def _write_caffe(self, btm):
global caffe_string, layer_counter
if not 'caffe_string' in globals():
caffe_string = ''
if not 'layer_counter' in globals():
layer_counter = 0
layer_name = 'actv%d_%d'%(layer_counter, self.param)
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%layer_name
if self.param == 0:
caffe_string += ' type: "ReLU"\n'
elif self.param == 1:
caffe_string += ' type: "PReLU"\n'
params_dict[layer_name] = {}
params_dict[layer_name]['gamma'] = 0.2
elif self.param == 6:
caffe_string += ' type: "Sigmoid"\n'
caffe_string += ' bottom: "%s"\n'%btm()
caffe_string += ' top: "%s"\n'%btm()
caffe_string += '}\n'
layer_counter += 1
return btm
def call(self, x):
"""
:param x: Input tensor or numpy array. The object will be automatically converted to tensor if the input is np.array. Note that other arrays in args or kwargs will not be auto-converted.
"""
name = x[1]
x = x[0]
if self.param == 0:
res = tf.nn.relu(x)
elif self.param == 1:
if 'leaky' in self.kwargs:
leaky = self.kwargs['leaky']
else:
leaky = 0.2
res = tf.maximum(x,x*leaky)
elif self.param == 2:
res = tf.nn.elu(x)
elif self.param == 3:
res = tf.tanh(x)
elif self.param == 4:
shape = x.get_shape().as_list()
res = tf.reshape(x,[-1,shape[1],shape[2],2,shape[-1]//2]) # potential bug in conv_net
res = tf.reduce_max(res,axis=[3])
elif self.param == 5:
shape = x.get_shape().as_list()
res = tf.reduce_max(tf.reshape(x,[-1,2,shape[-1]//2]),axis=[1])
elif self.param == 6:
res = tf.sigmoid(x)
elif self.param == 7:
# res = tf.nn.swish(x)
# res = tf.sigmoid(x) * x
res = swish(x)
else:
res = x
lname = self._write_caffe(name)
return res, lname
class fcLayer(KLayer):
"""
Basic fully connected layer
"""
def __init__(self, outsize, usebias=True, values=None, norm=False, map_shape=None):
"""
:type outsize: int
:param outsize: Number of output channels
:type usebias: bool
:param usebias: Whether to add bias term in this layer.
:type values: list[np.array]
:param values: If the param 'values' is set, the layer will be initialized with the list of numpy array.
:type norm: bool (default=False)
:param norm: Whether to normalize the kernel (along axis 0) before matrix multiplication
:type map_shape: list (default=None)
:param map_shape: If shape is set, weight will be re-shaped to fit NCHW format
"""
super(fcLayer, self).__init__()
self.outsize = outsize
self.usebias = usebias
self.values = values
self.norm = norm
self.map_shape = map_shape
def _parse_args(self, input_shape):
# set size
insize = input_shape[0][-1]
self.size = [insize, self.outsize]
def build(self, input_shape):
values = self.values
self._parse_args(input_shape)
if self.values is not None:
self.kernel = self.add_variable('kernel', shape=self.size, initializer=tf.initializers.constant(values[0]))
else:
self.kernel = self.add_variable('kernel', shape=self.size, initializer=tf.initializers.GlorotUniform())
if self.usebias:
if self.values is not None:
self.bias = self.add_variable('bias', shape=[self.outsize], initializer=tf.initializers.constant(values[1]))
else:
self.bias = self.add_variable('bias', shape=[self.outsize], initializer=tf.initializers.constant(0.0))
def _write_caffe(self, name):
global caffe_string, layer_counter
if not 'caffe_string' in globals():
caffe_string = ''
if not 'layer_counter' in globals():
layer_counter = 0
layer_name = 'fc%d'%layer_counter
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%layer_name
caffe_string += ' type: "InnerProduct"\n'
caffe_string += ' bottom: "%s"\n'%name()
caffe_string += ' top: "%s"\n'%layer_name
caffe_string += ' inner_product_param{\n'
caffe_string += ' num_output: %d\n'%self.outsize
caffe_string += ' bias_term: %s\n'%('true' if self.usebias else 'false')
caffe_string += ' }\n}\n'
params_dict[layer_name] = {}
if self.map_shape is None:
params_dict[layer_name]['fckernel'] = self.kernel.numpy()
else:
transpose_w = self.kernel.numpy()
transpose_w = np.reshape(transpose_w, [self.map_shape[0], self.map_shape[1], self.map_shape[2], self.outsize])
transpose_w = np.transpose(transpose_w, [2,1,0,3])
transpose_w = np.reshape(transpose_w, [-1, self.outsize])
params_dict[layer_name]['fckernel'] = transpose_w
if self.usebias:
params_dict[layer_name]['bias'] = self.bias.numpy()
layer_counter += 1
return helper.LayerName(layer_name)
def call(self, x):
"""
:param x: Input tensor or numpy array. The object will be automatically converted to tensor if the input is np.array. Note that other arrays in args or kwargs will not be auto-converted.
"""
name = x[1]
x = x[0]
if self.norm:
k = tf.nn.l2_normalize(self.kernel, axis=0)
else:
k = self.kernel
res = tf.matmul(x, k)
if self.usebias:
res = tf.nn.bias_add(res, self.bias)
lname = self._write_caffe(name)
return res, lname
class batch_norm(KLayer):
"""
Basic batch normalization layer
"""
def __init__(self, decay=1e-2, epsilon=1e-5, is_training=None, values=None):
"""
:type decay: float
:param decay: Decay rate.
:type epsilon: float
:param epsilon: Epsilon value to avoid 0 division.
:type is_training: bool
:param is_training: Define whether this layer is in training mode
:type values: list[np.array]
:param values: If the param 'values' is set, the layer will be initialized with the list of numpy array.
"""
super(batch_norm, self).__init__()
self.decay = decay
self.epsilon = epsilon
self.is_training = is_training
self.values = values
def build(self, input_shape):
values = self.values
shape = input_shape[-1]
if self.values is None:
self.moving_average = self.add_variable('moving_average',[shape],initializer=tf.initializers.constant(0.0),trainable=False)
self.variance = self.add_variable('variance',[shape],initializer=tf.initializers.constant(1.0),trainable=False)
self.gamma = self.add_variable('gamma',[shape],initializer=tf.initializers.constant(1.0),trainable=True)
self.beta = self.add_variable('beta',[shape],initializer=tf.initializers.constant(0.0),trainable=True)
else:
self.moving_average = self.add_variable('moving_average',[shape],initializer=tf.initializers.constant(self.values[0]),trainable=False)
self.variance = self.add_variable('variance',[shape],initializer=tf.initializers.constant(values[1]),trainable=False)
self.gamma = self.add_variable('gamma',[shape],initializer=tf.initializers.constant(values[2]),trainable=True)
self.beta = self.add_variable('beta',[shape],initializer=tf.initializers.constant(values[3]),trainable=True)
def update(self,variable,value):
delta = (variable - value) * self.decay
variable.assign_sub(delta)
def _write_caffe(self, btm):
global caffe_string, layer_counter
if not 'caffe_string' in globals():
caffe_string = ''
if not 'layer_counter' in globals():
layer_counter = 0
layer_name = 'bn%d'%layer_counter
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%layer_name
caffe_string += ' type: "BatchNorm"\n'
caffe_string += ' bottom: "%s"\n'%btm()
caffe_string += ' top: "%s"\n'%btm()
caffe_string += ' batch_norm_param{\n use_global_stats:true\n eps:1e-5\n }\n'
caffe_string += '}\n'
params_dict[layer_name] = {}
params_dict[layer_name]['mean'] = self.moving_average.numpy()
params_dict[layer_name]['var'] = self.variance.numpy()
params_dict[layer_name]['scale'] = 1.
layer_name = 'scale%d'%layer_counter
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%layer_name
caffe_string += ' type: "Scale"\n'
caffe_string += ' bottom: "%s"\n'%btm()
caffe_string += ' top: "%s"\n'%btm()
caffe_string += ' scale_param{\n bias_term:true\n }\n'
caffe_string += '}\n'
params_dict[layer_name] = {}
params_dict[layer_name]['scale'] = self.gamma.numpy()
params_dict[layer_name]['bias'] = self.beta.numpy()
return btm
def call(self, x):
"""
:param x: Input tensor or numpy array. The object will be automatically converted to tensor if the input is np.array. Note that other arrays in args or kwargs will not be auto-converted.
"""
name = x[1]
x = x[0]
if self.is_training is None:
is_training = bool(tf.keras.backend.learning_phase())
else:
is_training = self.is_training
# is_training = True
# print(is_training, time.time())
inp_shape = x.get_shape().as_list()
inp_dim_num = len(inp_shape)
if inp_dim_num==3:
x = tf.expand_dims(x, axis=1)
elif inp_dim_num==2:
x = tf.expand_dims(x, axis=1)
x = tf.expand_dims(x, axis=1)
elif inp_dim_num==5:
x = tf.reshape(x, [inp_shape[0], inp_shape[1], inp_shape[2]*inp_shape[3], inp_shape[4]])
if is_training:
res, mean, var = tf.compat.v1.nn.fused_batch_norm(x, self.gamma, self.beta, None, None, self.epsilon, is_training=is_training)
self.update(self.moving_average, mean)
self.update(self.variance, var)
else:
res, mean, var = tf.compat.v1.nn.fused_batch_norm(x, self.gamma, self.beta, self.moving_average, self.variance, self.epsilon, is_training=is_training)
if inp_dim_num==3:
res = tf.squeeze(res , axis=1)
elif inp_dim_num==2:
res = tf.squeeze(res, axis=[1,2])
elif inp_dim_num==5:
res = tf.reshape(res, inp_shape)
lname = self._write_caffe(name)
return res, lname
class flatten(KLayer):
"""
Basic flatten layer
"""
def __init__(self):
super(flatten, self).__init__()
def build(self, input_shape):
self.shape = input_shape
def _write_caffe(self, name):
global caffe_string, layer_counter
if not 'caffe_string' in globals():
caffe_string = ''
if not 'layer_counter' in globals():
layer_counter = 0
layer_name = 'flatten%d'%layer_counter
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%layer_name
caffe_string += ' type: "Flatten"\n'
caffe_string += ' bottom: "%s"\n'%name()
caffe_string += ' top: "%s"\n'%layer_name
# caffe_string += ' crop_param{\n offset:%d\n offset:%d\n }\n}\n'%(1,1)
caffe_string += '}\n'
layer_counter += 1
return helper.LayerName(layer_name)
def call(self, x):
"""
:param x: Input tensor or numpy array. The object will be automatically converted to tensor if the input is np.array. Note that other arrays in args or kwargs will not be auto-converted.
"""
name = x[1]
x = x[0]
self.shape = x.get_shape().as_list()
num = 1
for k in self.shape[1:]:
num *= k
res = tf.reshape(x, [-1, num])
lname = self._write_caffe(name)
return res , lname
class NNUpSample2D(KLayer):
"""docstring for NNUpSample"""
def __init__(self, factor):
super(NNUpSample2D, self).__init__()
self.factor = factor
def _get_weights(self):
w = np.zeros([self.factor, self.factor, self.chn, self.chn])
w = np.float32(w)
for i in range(self.chn):
w[:,:,i,i] = 1
return w
def build(self, input_shape):
self.chn = input_shape[0][-1]
def _write_caffe(self, name):
global caffe_string, layer_counter
if not 'caffe_string' in globals():
caffe_string = ''
if not 'layer_counter' in globals():
layer_counter = 0
layer_name = 'nnup%d'%layer_counter
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%layer_name
caffe_string += ' type: "Deconvolution"\n'
caffe_string += ' bottom: "%s"\n'%name()
caffe_string += ' top: "%s"\n'%layer_name
caffe_string += ' convolution_param{\n'
caffe_string += ' num_output: %d\n'%self.chn
caffe_string += ' bias_term: %s\n'%('false')
caffe_string += ' stride: %d\n'%self.factor
caffe_string += ' kernel_h: %d\n'%(self.factor)
caffe_string += ' kernel_w: %d\n'%(self.factor)
caffe_string += ' }\n}\n'
params_dict[layer_name] = {}
params_dict[layer_name]['kernel'] = self._get_weights()
layer_counter += 1
return helper.LayerName(layer_name)
def call(self, x):
name = x[1]
x = x[0]
shape = x.get_shape().as_list()
w = self._get_weights()
outshape = [shape[0], shape[1]*self.factor, shape[2]*self.factor, self.chn]
stride = [1, self.factor, self.factor, 1]
x = tf.nn.conv2d_transpose(x, w, outshape, stride)
lname = self._write_caffe(name)
return x, lname
class BroadcastMUL(KLayer):
def __init__(self):
super(BroadcastMUL, self).__init__()
def _write_caffe(self, names, tiles, outchn):
global caffe_string, layer_counter
if not 'caffe_string' in globals():
caffe_string = ''
if not 'layer_counter' in globals():
layer_counter = 0
# manual tiling layers to match the size
# layer_name = 'tile_0_%d'%layer_counter
# caffe_string += 'layer{\n'
# caffe_string += ' name: "%s"\n'%layer_name
# caffe_string += ' type: "Tile"\n'
# caffe_string += ' bottom:"%s"\n'%names[0]()
# caffe_string += ' top: "%s"\n'%layer_name
# caffe_string += ' tile_param{\n axis:2\n tiles:%d\n }\n'%tiles
# caffe_string += '}\n'
# layer_name = 'tile_1_%d'%layer_counter
# caffe_string += 'layer{\n'
# caffe_string += ' name: "%s"\n'%layer_name
# caffe_string += ' type: "Tile"\n'
# caffe_string += ' bottom:"tile_0_%d"\n'%layer_counter
# caffe_string += ' top: "%s"\n'%layer_name
# caffe_string += ' tile_param{\n axis:3\n tiles:%d\n }\n'%tiles
# caffe_string += '}\n'
layer_name = 'tile_0_%d'%layer_counter
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%layer_name
caffe_string += ' type: "Deconvolution"\n'
caffe_string += ' bottom: "%s"\n'%names[0]()
caffe_string += ' top: "%s"\n'%layer_name
caffe_string += ' convolution_param{\n'
caffe_string += ' num_output: %d\n'%outchn
caffe_string += ' bias_term: %s\n'%('false')
caffe_string += ' group: %d\n'%outchn
caffe_string += ' stride: 1\n'
caffe_string += ' pad_h: 0\n'
caffe_string += ' pad_w: 0\n'
caffe_string += ' kernel_h: %d\n'%tiles
caffe_string += ' kernel_w: %d\n'%tiles
caffe_string += ' }\n}\n'
params_dict[layer_name] = {}
params_dict[layer_name]['dwkernel'] = np.ones([tiles, tiles, outchn, 1]).astype(np.float32)
# do multiplication
layer_name = 'mul%d'%layer_counter
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%layer_name
caffe_string += ' type: "Eltwise"\n'
caffe_string += ' bottom:"tile_0_%d"\n'%layer_counter
caffe_string += ' bottom:"%s"\n'%names[1]()
caffe_string += ' top: "%s"\n'%layer_name
caffe_string += ' eltwise_param{\n operation:PROD\n }\n'
caffe_string += '}\n'
layer_counter += 1
return helper.LayerName(layer_name)
def call(self, x):
names = [i[1] for i in x]
xs = [i[0] for i in x]
out = xs[0]*xs[1]
lname = self._write_caffe(names, xs[1].shape[1], xs[1].shape[-1])
return out, lname
class SUM(KLayer):
def __init__(self):
super(SUM, self).__init__()
def _write_caffe(self, names):
global caffe_string, layer_counter
if not 'caffe_string' in globals():
caffe_string = ''
if not 'layer_counter' in globals():
layer_counter = 0
layer_name = 'add%d'%layer_counter
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%layer_name
caffe_string += ' type: "Eltwise"\n'
for n in names:
caffe_string += ' bottom:"%s"\n'%n()
caffe_string += ' top: "%s"\n'%layer_name
caffe_string += ' eltwise_param{\n operation:SUM\n }\n'
caffe_string += '}\n'
layer_counter += 1
return helper.LayerName(layer_name)
def call(self, x):
names = [i[1] for i in x]
xs = [i[0] for i in x]
lname = self._write_caffe(names)
return sum(xs), lname
class CONCAT(KLayer):
def __init__(self):
super(CONCAT, self).__init__()
def _write_caffe(self, names):
global caffe_string, layer_counter
if not 'caffe_string' in globals():
caffe_string = ''
if not 'layer_counter' in globals():
layer_counter = 0
layer_name = 'concat%d'%layer_counter
caffe_string += 'layer{\n'
caffe_string += ' name: "%s"\n'%layer_name
caffe_string += ' type: "Concat"\n'
for n in names:
caffe_string += ' bottom:"%s"\n'%n()
caffe_string += ' top: "%s"\n'%layer_name
caffe_string += ' concat_param{\n axis:1\n }\n'
caffe_string += '}\n'
layer_counter += 1
return helper.LayerName(layer_name)
def call(self, x):
names = [i[1] for i in x]
xs = [i[0] for i in x]
lname = self._write_caffe(names)
return tf.concat(xs, axis=-1), lname
| StarcoderdataPython |
1631261 | import json
import logging
import redis
from mercury.common.exceptions import MercuryClientException
from mercury.common.transport import SimpleRouterReqService
from mercury.backend.queue_service.options import parse_options
log = logging.getLogger(__name__)
class QueueService(SimpleRouterReqService):
""" Simple backend for queuing tasks """
REQUIRED_TASK_KEYS = ['host', 'port', 'task_id', 'job_id', 'method', 'args',
'kwargs']
def __init__(self, bind_address, redis_client, queue_name):
"""
:param bind_address:
:param redis_client:
:param queue_name:
"""
super(QueueService, self).__init__(bind_address)
self.redis_client = redis_client
self.queue_name = queue_name
def enqueue_task(self, task):
"""
:param task:
:return:
"""
self.validate_required(self.REQUIRED_TASK_KEYS, task)
log.debug('Enqueuing task: {job_id} / {task_id}'.format(**task))
self.redis_client.lpush(self.queue_name, json.dumps(task))
def process(self, message):
"""
:param message:
:return:
"""
if message['endpoint'] != 'enqueue_task':
raise MercuryClientException('Unsupported endpoint')
self.enqueue_task(message['args'][0])
return dict(error=False, message='Done')
def configure_logging(config):
""" Configure logging for application
:param config: A namespace provided from MercuryConfiguration.parse_args
"""
logging.basicConfig(level=logging.getLevelName(config.logging.level),
format=config.logging.format)
def main():
""" Service entry point """
config = parse_options()
configure_logging(config)
redis_client = redis.Redis(host=config.backend.redis.host,
port=config.backend.redis.port)
server = QueueService(config.backend.queue_service.bind_address,
redis_client,
config.backend.redis.queue)
server.start()
if __name__ == '__main__':
main()
| StarcoderdataPython |
3347055 | <reponame>d9e7381f/onlinejudge-2.0<gh_stars>0
from django.apps import AppConfig
class DelegationConfig(AppConfig):
name = 'delegation'
| StarcoderdataPython |
92335 | import typing as typ
import httpx
from chaban.config import global_settings, settings
from chaban.core.exceptions import HttpMethodNotAllowed, TelegramAPIError
from chaban.handlers.base import mh_registry
from chaban.utils import MetaSingleton
from .telegram_methods import TelegramMethodsMixin
class TelegramBot(TelegramMethodsMixin, metaclass=MetaSingleton):
"""
Main telegram bot class.
All methods related to telegram api are defined in ``TelegramMethodsMixin``.
On init, get ``TELEGRAM_TOKEN`` from settings.
Settings are getting that token from env.
"""
_allowed_http_methods = global_settings.TELEGRAM_ALLOWED_HTTP_METHODS
def __init__(self):
# bot don't need the token as a separate constant, token is only used as part of
# the telegram api endpoint url
self._endpoint = "https://api.telegram.org/bot{}/".format(
settings.TELEGRAM_TOKEN
)
def _build_url(self, method_name: str) -> str:
return self._endpoint + method_name.lstrip("/")
def request(
self, method_name: str, http_method: str = "get", **kwargs
) -> typ.Dict[str, typ.Any]:
"""
Perform an HTTP :param http_method: request and pass the kwargs as params.
Returns a JSON.
"""
http_method = http_method.lower()
if http_method not in self._allowed_http_methods:
raise HttpMethodNotAllowed
return httpx.request(
http_method, self._build_url(method_name), params=kwargs
).json()
def start_polling(self):
for message in self._poll_updates():
mh_registry.get_handler_and_handle(message)
def _poll_updates(self) -> typ.Iterator[typ.Dict[str, typ.Any]]:
"""
Main loop.
Getting updates from telegram, handling offset, yielding each update's message
"""
# set offset to 0
offset = 0
# start loop
while True:
# get json response from telegram
resp = self.get_updates(offset=offset)
# if not ok, raise the error
if not resp["ok"]:
raise TelegramAPIError("Response JSON: {}".format(resp))
# iterate through updates from resp json
for update in resp["result"]:
# update offset
offset = max(offset, update["update_id"] + 1)
# yield message
yield update.get("message", update.get("edited_message"))
| StarcoderdataPython |
1753208 | <reponame>doubleukay/bxgateway
class GatewayMessageType:
HELLO = b"gw_hello"
BLOCK_RECEIVED = b"blockrecv"
BLOCK_PROPAGATION_REQUEST = b"blockprop"
# Sync messages types are currently unused. See `blockchain_sync_service.py`.
SYNC_REQUEST = b"syncreq"
SYNC_RESPONSE = b"syncres"
REQUEST_TX_STREAM = b"rqtx"
CONFIRMED_TX = b"cnfrmtx"
| StarcoderdataPython |
118366 | <gh_stars>1-10
from typing import Tuple, List
from dataclasses import dataclass
from .. import inference_errors as ierr
from .. import type_system as ts
from .. import context
from ..code_blocks import Primitive
from ..type_engine import TypingContext
from . import func_methods, concrete_methods, add_method_to_list
# ---------------------------------------------------------------------
mapping_int_binary = {
"__eq__": (False, "eq"),
"__ne__": (False, "ne"),
'__gt__': (False, "sgt"),
'__lt__': (False, "slt"),
'__ge__': (False, "sge"),
'__le__': (False, "sle"),
'__add__': (True, "add"),
'__sub__': (True, "sub"),
'__mul__': (True, "mul"),
'__div__': (True, "sdiv"),
'__mod__': (True, "srem"),
}
@add_method_to_list(func_methods)
def gen_int_type_ops(
tc: TypingContext,
name: str,
type_argument_types: Tuple[ts.Type],
argument_types: Tuple[ts.Type],
):
if name not in mapping_int_binary:
raise ierr.TypeGenError()
if len(type_argument_types) != 0:
raise ierr.TypeGenError()
if len(argument_types) != 2:
raise ierr.TypeGenError()
if not isinstance(argument_types[0], ts.IntType):
raise ierr.TypeGenError()
if not isinstance(argument_types[1], ts.IntType):
raise ierr.TypeGenError()
if argument_types[0].size != argument_types[1].size:
raise ierr.TypeGenError()
dname = tc.scope_man.new_func_name(f"dummy_func_{name}")
retty = argument_types[0] if mapping_int_binary[name][0] else ts.BoolType()
tc.code_blocks.append(IntTypeOpPrimitive(
dname,
name,
argument_types[0].size,
))
ft = ts.FunctionType(
dname,
retty,
do_not_copy_args = False,
)
return ft
@dataclass
class IntTypeOpPrimitive(Primitive):
mangled_name: str
op: str
size: int
def get_code(self):
def arithmetic(opname):
return [
f"define dso_local i{self.size} @{self.mangled_name}(i{self.size} %0, i{self.size} %1) {{",
f"\t%3 = {opname} nsw i{self.size} %0, %1",
f"\tret i{self.size} %3",
f"}}",
]
def comp(opname):
return [
f"define dso_local i1 @{self.mangled_name}(i{self.size} %0, i{self.size} %1) {{",
f"\t%3 = icmp {opname} i{self.size} %0, %1",
f"\tret i1 %3",
f"}}",
]
a,opname = mapping_int_binary[self.op]
if a:
return arithmetic(opname)
else:
return comp(opname)
# ---------------------------------------------------------------------
mapping_char_binary = {
"__eq__": (False, "eq"),
"__ne__": (False, "ne"),
}
@add_method_to_list(func_methods)
def gen_char_type_ops(
tc: TypingContext,
name: str,
type_argument_types: Tuple[ts.Type],
argument_types: Tuple[ts.Type],
):
if name not in mapping_char_binary:
raise ierr.TypeGenError()
if len(type_argument_types) != 0:
raise ierr.TypeGenError()
if len(argument_types) != 2:
raise ierr.TypeGenError()
if not isinstance(argument_types[0], ts.CharType):
raise ierr.TypeGenError()
if not isinstance(argument_types[1], ts.CharType):
raise ierr.TypeGenError()
dname = tc.scope_man.new_func_name(f"dummy_func_{name}")
retty = ts.BoolType()
tc.code_blocks.append(CharTypeOpPrimitive(
dname,
name,
))
ft = ts.FunctionType(
dname,
retty,
do_not_copy_args = False,
)
return ft
@dataclass
class CharTypeOpPrimitive(Primitive):
mangled_name: str
op: str
def get_code(self):
def comp(opname):
return [
f"define dso_local i1 @{self.mangled_name}(i8 %0, i8 %1) {{",
f"\t%3 = icmp {opname} i8 %0, %1",
f"\tret i1 %3",
f"}}",
]
a,opname = mapping_int_binary[self.op]
return comp(opname)
# ---------------------------------------------------------------------
mapping_bool_binary = {
"__and__": "and",
"__or__": "or",
}
@add_method_to_list(func_methods)
def gen_bool_type_ops(
tc: TypingContext,
name: str,
type_argument_types: Tuple[ts.Type],
argument_types: Tuple[ts.Type],
):
if name not in mapping_bool_binary:
raise ierr.TypeGenError()
if len(type_argument_types) != 0:
raise ierr.TypeGenError()
if len(argument_types) != 2:
raise ierr.TypeGenError()
if not isinstance(argument_types[0], ts.BoolType):
raise ierr.TypeGenError()
if not isinstance(argument_types[1], ts.BoolType):
raise ierr.TypeGenError()
dname = tc.scope_man.new_func_name(f"dummy_func_{name}")
tc.code_blocks.append(BoolTypeOpPrimitive(
dname,
name,
))
ft = ts.FunctionType(
dname,
ts.BoolType(),
do_not_copy_args = False,
)
return ft
@dataclass
class BoolTypeOpPrimitive(Primitive):
mangled_name: str
op: str
def get_code(self):
return [
f"define dso_local i1 @{self.mangled_name}(i1 %0, i1 %1) {{",
f"\t%3 = {mapping_bool_binary[self.op]} i1 %0, %1",
f"\tret i1 %3",
f"}}",
]
# ---------------------------------------------------------------------
@add_method_to_list(func_methods)
def gen_bool_type_not(
tc: TypingContext,
name: str,
type_argument_types: Tuple[ts.Type],
argument_types: Tuple[ts.Type],
):
if name != '__not__':
raise ierr.TypeGenError()
if len(type_argument_types) != 0:
raise ierr.TypeGenError()
if len(argument_types) != 1:
raise ierr.TypeGenError()
if not isinstance(argument_types[0], ts.BoolType):
raise ierr.TypeGenError()
dname = tc.scope_man.new_func_name(f"dummy_func_{name}")
tc.code_blocks.append(BoolTypeNotPrimitive(
dname,
))
ft = ts.FunctionType(
dname,
ts.BoolType(),
do_not_copy_args = False,
)
return ft
@dataclass
class BoolTypeNotPrimitive(Primitive):
mangled_name: str
def get_code(self):
return [
f"define dso_local i1 @{self.mangled_name}(i1 %0) {{",
f"\t%2 = add i1 1, %0",
f"\tret i1 %2",
f"}}",
]
# ---------------------------------------------------------------------
| StarcoderdataPython |
194016 | <filename>evalai/utils/challenges.py<gh_stars>10-100
import json
import requests
import sys
from bs4 import BeautifulSoup
from beautifultable import BeautifulTable
from click import echo, style
from datetime import datetime
from termcolor import colored
from evalai.utils.auth import get_request_header, get_host_url
from evalai.utils.common import (
clean_data,
convert_UTC_date_to_local,
validate_date_format,
validate_token,
)
from evalai.utils.config import EVALAI_ERROR_CODES
from evalai.utils.urls import URLS
requests.packages.urllib3.disable_warnings()
def pretty_print_challenge_data(challenges):
"""
Function to print the challenge data
"""
table = BeautifulTable(max_width=200)
attributes = ["id", "title", "short_description"]
columns_attributes = [
"ID",
"Title",
"Short Description",
"Creator",
"Start Date",
"End Date",
]
table.column_headers = columns_attributes
for challenge in reversed(challenges):
values = list(map(lambda item: challenge[item], attributes))
creator = challenge["creator"]["team_name"]
start_date = convert_UTC_date_to_local(challenge["start_date"])
end_date = convert_UTC_date_to_local(challenge["end_date"])
values.extend([creator, start_date, end_date])
table.append_row([colored(values[0], 'white'),
colored(values[1], 'yellow'),
colored(values[2], 'cyan'),
colored(values[3], 'white'),
colored(values[4], 'green'),
colored(values[5], 'red'),
])
echo(table, color='yes')
def display_challenges(url):
"""
Function to fetch & display the challenge list based on API
"""
header = get_request_header()
try:
response = requests.get(url, headers=header)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
if response.status_code == 401:
validate_token(response.json())
echo(err)
sys.exit(1)
except requests.exceptions.RequestException:
echo(
style(
"\nCould not establish a connection to EvalAI."
" Please check the Host URL.\n",
bold=True,
fg="red",
)
)
sys.exit(1)
response = response.json()
challenges = response["results"]
if len(challenges) != 0:
pretty_print_challenge_data(challenges)
else:
echo(style("Sorry, no challenges found.", bold=True, fg="red"))
def display_all_challenge_list():
"""
Displays the list of all challenges from the backend
"""
url = "{}{}".format(get_host_url(), URLS.challenge_list.value)
display_challenges(url)
def display_past_challenge_list():
"""
Displays the list of past challenges from the backend
"""
url = "{}{}".format(get_host_url(), URLS.past_challenge_list.value)
display_challenges(url)
def display_ongoing_challenge_list():
"""
Displays the list of ongoing challenges from the backend
"""
url = "{}{}".format(get_host_url(), URLS.challenge_list.value)
header = get_request_header()
try:
response = requests.get(url, headers=header)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
if response.status_code == 401:
validate_token(response.json())
echo(err)
sys.exit(1)
except requests.exceptions.RequestException:
echo(
style(
"\nCould not establish a connection to EvalAI."
" Please check the Host URL.\n",
bold=True,
fg="red",
)
)
sys.exit(1)
response = response.json()
challenges = response["results"]
# Filter out past/unapproved/unpublished challenges.
challenges = list(
filter(
lambda challenge: validate_date_format(challenge["end_date"])
> datetime.now()
and challenge["approved_by_admin"]
and challenge["published"],
challenges,
)
)
if len(challenges) != 0:
pretty_print_challenge_data(challenges)
else:
echo(style("Sorry, no challenges found.", bold=True, fg="red"))
def display_future_challenge_list():
"""
Displays the list of future challenges from the backend
"""
url = "{}{}".format(get_host_url(), URLS.future_challenge_list.value)
display_challenges(url)
def get_participant_or_host_teams(url):
"""
Returns the participant or host teams corresponding to the user
"""
header = get_request_header()
try:
response = requests.get(url, headers=header)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
if response.status_code == 401:
validate_token(response.json())
echo(err)
sys.exit(1)
except requests.exceptions.RequestException:
echo(
style(
"\nCould not establish a connection to EvalAI."
" Please check the Host URL.\n",
bold=True,
fg="red",
)
)
sys.exit(1)
response = response.json()
return response["results"]
def get_participant_or_host_team_challenges(url, teams):
"""
Returns the challenges corresponding to the participant or host teams
"""
challenges = []
for team in teams:
header = get_request_header()
try:
response = requests.get(url.format(team["id"]), headers=header)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
if response.status_code == 401:
validate_token(response.json())
echo(err)
sys.exit(1)
except requests.exceptions.RequestException:
echo(
style(
"\nCould not establish a connection to EvalAI."
" Please check the Host URL.\n",
bold=True,
fg="red",
)
)
sys.exit(1)
response = response.json()
challenges += response["results"]
return challenges
def display_participated_or_hosted_challenges(
is_host=False, is_participant=False
):
"""
Function to display the participated or hosted challenges by a user
"""
challenges = []
if is_host:
team_url = "{}{}".format(get_host_url(), URLS.host_teams.value)
challenge_url = "{}{}".format(
get_host_url(), URLS.host_challenges.value
)
teams = get_participant_or_host_teams(team_url)
challenges = get_participant_or_host_team_challenges(
challenge_url, teams
)
echo(style("\nHosted Challenges\n", bold=True))
if len(challenges) != 0:
pretty_print_challenge_data(challenges)
else:
echo(style("Sorry, no challenges found.", bold=True, fg="red"))
if is_participant:
team_url = "{}{}".format(get_host_url(), URLS.participant_teams.value)
challenge_url = "{}{}".format(
get_host_url(), URLS.participant_challenges.value
)
teams = get_participant_or_host_teams(team_url)
challenges = get_participant_or_host_team_challenges(
challenge_url, teams
)
if len(challenges) != 0:
# Filter out past/unapproved/unpublished challenges.
challenges = list(
filter(
lambda challenge: validate_date_format(
challenge["end_date"]
)
> datetime.now()
and challenge["approved_by_admin"]
and challenge["published"],
challenges,
)
)
if challenges:
echo(style("\nParticipated Challenges\n", bold=True))
pretty_print_challenge_data(challenges)
else:
echo(style("Sorry, no challenges found.", bold=True, fg="red"))
else:
echo(style("Sorry, no challenges found.", bold=True, fg="red"))
def pretty_print_challenge_details(challenge):
table = BeautifulTable(max_width=200)
attributes = [
"description",
"submission_guidelines",
"evaluation_details",
"terms_and_conditions",
]
table.column_headers = [
"Start Date",
"End Date",
"Description",
"Submission Guidelines",
"Evaluation Details",
"Terms and Conditions",
]
values = []
start_date = convert_UTC_date_to_local(challenge["start_date"]).split(" ")[
0
]
end_date = convert_UTC_date_to_local(challenge["end_date"]).split(" ")[0]
values.extend([start_date, end_date])
values.extend(
list(map(lambda item: clean_data(challenge[item]), attributes))
)
table.append_row(values)
echo(table)
def display_challenge_details(challenge):
"""
Function to display challenge details.
"""
url = URLS.challenge_details.value
url = "{}{}".format(get_host_url(), url)
url = url.format(challenge)
header = get_request_header()
try:
response = requests.get(url, headers=header)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
if response.status_code in EVALAI_ERROR_CODES:
validate_token(response.json())
echo(
style(
"\nError: {}".format(response.json()["error"]),
fg="red",
bold=True,
)
)
echo(
style(
"\nUse `evalai challenges` to fetch the active challenges.\n",
fg="red",
bold=True,
)
)
else:
echo(err)
sys.exit(1)
except requests.exceptions.RequestException:
echo(
style(
"\nCould not establish a connection to EvalAI."
" Please check the Host URL.\n",
bold=True,
fg="red",
)
)
sys.exit(1)
response = response.json()
pretty_print_challenge_details(response)
def pretty_print_all_challenge_phases(phases):
"""
Function to print all the challenge phases of a challenge
"""
table = BeautifulTable(max_width=150)
attributes = ["id", "name", "challenge"]
columns_attributes = [
"Phase ID",
"Phase Name",
"Challenge ID",
"Description",
]
table.column_headers = columns_attributes
for phase in phases:
values = list(map(lambda item: phase[item], attributes))
description = clean_data(phase["description"])
values.append(description)
table.append_row(values)
echo(table)
def display_challenge_phase_list(challenge_id):
"""
Function to display all challenge phases for a particular challenge.
"""
url = URLS.challenge_phase_list.value
url = "{}{}".format(get_host_url(), url)
url = url.format(challenge_id)
headers = get_request_header()
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
if response.status_code in EVALAI_ERROR_CODES:
validate_token(response.json())
echo(
style(
"\nError: {}".format(response.json()["error"]),
fg="red",
bold=True,
)
)
echo(
style(
"\nUse `evalai challenges` to fetch the active challenges.",
fg="red",
bold=True,
)
)
echo(
style(
"\nUse `evalai challenge CHALLENGE phases` to fetch the active phases.\n",
fg="red",
bold=True,
)
)
else:
echo(err)
sys.exit(1)
except requests.exceptions.RequestException:
echo(
style(
"\nCould not establish a connection to EvalAI."
" Please check the Host URL.\n",
bold=True,
fg="red",
)
)
sys.exit(1)
response = response.json()
challenge_phases = response["results"]
pretty_print_all_challenge_phases(challenge_phases)
def pretty_print_challenge_phase_data(phase):
"""
Function to print the details of a challenge phase.
"""
phase_title = "\n{}".format(style(phase["name"], bold=True, fg="green"))
challenge_id = "Challenge ID: {}".format(
style(str(phase["challenge"]), bold=True, fg="blue")
)
phase_id = "Phase ID: {}\n\n".format(
style(str(phase["id"]), bold=True, fg="blue")
)
title = "{} {} {}".format(phase_title, challenge_id, phase_id)
cleaned_desc = BeautifulSoup(phase["description"], "lxml").text
description = "{}\n".format(cleaned_desc)
start_date = "Start Date : {}".format(
style(phase["start_date"].split("T")[0], fg="green")
)
start_date = "\n{}\n".format(style(start_date, bold=True))
end_date = "End Date : {}".format(
style(phase["end_date"].split("T")[0], fg="red")
)
end_date = "\n{}\n".format(style(end_date, bold=True))
max_submissions_per_day = style(
"\nMaximum Submissions per day : {}\n".format(
str(phase["max_submissions_per_day"])
),
bold=True,
)
max_submissions = style(
"\nMaximum Submissions : {}\n".format(str(phase["max_submissions"])),
bold=True,
)
codename = style("\nCode Name : {}\n".format(phase["codename"]), bold=True)
leaderboard_public = style(
"\nLeaderboard Public : {}\n".format(phase["leaderboard_public"]),
bold=True,
)
is_active = style("\nActive : {}\n".format(phase["is_active"]), bold=True)
is_public = style("\nPublic : {}\n".format(phase["is_public"]), bold=True)
challenge_phase = "{}{}{}{}{}{}{}{}{}{}".format(
title,
description,
start_date,
end_date,
max_submissions_per_day,
max_submissions,
leaderboard_public,
codename,
is_active,
is_public,
)
echo(challenge_phase)
def display_challenge_phase_detail(challenge_id, phase_id, is_json):
"""
Function to print details of a challenge phase.
"""
url = URLS.challenge_phase_detail.value
url = "{}{}".format(get_host_url(), url)
url = url.format(challenge_id, phase_id)
headers = get_request_header()
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
if response.status_code in EVALAI_ERROR_CODES:
validate_token(response.json())
echo(
style(
"\nError: {}\n"
"\nUse `evalai challenges` to fetch the active challenges.\n"
"\nUse `evalai challenge CHALLENGE phases` to fetch the "
"active phases.\n".format(response.json()["error"]),
fg="red",
bold=True,
)
)
else:
echo(err)
sys.exit(1)
except requests.exceptions.RequestException:
echo(
style(
"\nCould not establish a connection to EvalAI."
" Please check the Host URL.\n",
bold=True,
fg="red",
)
)
sys.exit(1)
response = response.json()
phase = response
if is_json:
phase_json = json.dumps(phase, indent=4, sort_keys=True)
echo(phase_json)
else:
pretty_print_challenge_phase_data(phase)
def pretty_print_challenge_phase_split_data(phase_splits):
"""
Function to print the details of a Challenge Phase Split.
"""
table = BeautifulTable(max_width=100)
attributes = ["id", "dataset_split_name", "challenge_phase_name"]
columns_attributes = [
"Challenge Phase ID",
"Dataset Split",
"Challenge Phase Name",
]
table.column_headers = columns_attributes
for split in phase_splits:
if split["visibility"] == 3:
values = list(map(lambda item: split[item], attributes))
table.append_row(values)
echo(table)
def display_challenge_phase_split_list(challenge_id):
"""
Function to display Challenge Phase Splits of a particular challenge.
"""
url = URLS.challenge_phase_split_detail.value
url = "{}{}".format(get_host_url(), url)
url = url.format(challenge_id)
headers = get_request_header()
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
if response.status_code in EVALAI_ERROR_CODES:
validate_token(response.json())
echo(
style(
"\nError: {}\n"
"\nUse `evalai challenges` to fetch the active challenges.\n"
"\nUse `evalai challenge CHALLENGE phases` to fetch the "
"active phases.\n".format(response.json()["error"]),
fg="red",
bold=True,
)
)
else:
echo(err)
sys.exit(1)
except requests.exceptions.RequestException:
echo(
style(
"\nCould not establish a connection to EvalAI."
" Please check the Host URL.\n",
bold=True,
fg="red",
)
)
sys.exit(1)
phase_splits = response.json()
if len(phase_splits) != 0:
pretty_print_challenge_phase_split_data(phase_splits)
else:
echo(style("Sorry, no Challenge Phase Splits found.", bold=True, fg="red"))
def pretty_print_leaderboard_data(attributes, results):
"""
Pretty print the leaderboard for a particular CPS.
"""
leaderboard_table = BeautifulTable(max_width=150)
attributes = ["Rank", "Participant Team"] + attributes + ["Last Submitted"]
attributes = list(map(lambda item: str(item), attributes))
leaderboard_table.column_headers = attributes
for rank, result in enumerate(results, start=1):
name = result["submission__participant_team__team_name"]
scores = result["result"]
last_submitted = convert_UTC_date_to_local(
result["submission__submitted_at"]
)
leaderboard_row = [rank, name] + scores + [last_submitted]
leaderboard_table.append_row(leaderboard_row)
echo(leaderboard_table)
def display_leaderboard(challenge_id, phase_split_id):
"""
Function to display the Leaderboard of a particular CPS.
"""
url = "{}{}".format(get_host_url(), URLS.leaderboard.value)
url = url.format(phase_split_id)
headers = get_request_header()
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
if response.status_code in EVALAI_ERROR_CODES:
validate_token(response.json())
echo(
style(
"Error: {}".format(response.json()["error"]),
fg="red",
bold=True,
)
)
else:
echo(err)
sys.exit(1)
except requests.exceptions.RequestException:
echo(
style(
"\nCould not establish a connection to EvalAI."
" Please check the Host URL.\n",
bold=True,
fg="red",
)
)
sys.exit(1)
response = response.json()
results = response["results"]
if len(results) != 0:
attributes = results[0]["leaderboard__schema"]["labels"]
pretty_print_leaderboard_data(attributes, results)
else:
echo(style("Sorry, no Leaderboard results found.", bold=True, fg="red"))
| StarcoderdataPython |
1632663 | <filename>main.py<gh_stars>0
import binascii
import glob,os
import numpy as np
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from keras.models import Sequential, load_model
import time
import matplotlib.pyplot as plt
model_path = './models/weights-improvement-10-0.92.hdf5'
model = load_model(model_path)
# model.load_weights(model_weights_path)
img_width, img_height = 150, 150
def predict(file):
x = load_img(file, target_size=(img_width,img_height))
# plt.imshow(x)
# plt.show()
x = img_to_array(x)
x = x.reshape((1,) + x.shape)
x /= 255
array = model.predict(x)
result = array[0,0]
answer = np.argmax(result)
if result < 0.5:
print(file+" Predicted to be a cat")
elif result > 0.5:
print(file+" Predicted to be a dog")
return answer
print("Enter path you want to scan:")
strDrive=input()
print()
print("File Name\t\t|\t\tMasqueraded?")
print("_____________________________________________________")
masqueraded=[]
i =0
for path in glob.glob(strDrive+"**/*", recursive=True):
if os.path.isfile(path):
name=os.path.basename(path)
i += 1
with open(path, 'rb') as f:
content = f.read()
var=(binascii.hexlify(content).decode("utf-8"))
if name.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', )):
print (f"{name}\t\t|\t\tNo")
elif var.startswith("ffd8ffdb"):
print (f"{name}\t\t|\t\tYes (JPG)")
masqueraded.append(path)
# predict(path)
elif var.startswith("ffd8ffe000104a4649460001"):
print (f"{name}\t\t|\t\tYes (JPG)")
masqueraded.append(path)
# predict(path)
elif var.startswith("ffd8ffee"):
print (f"{name}\t\t|\t\tYes (JPG)")
masqueraded.append(path)
# predict(path)
elif var.startswith("89504e470d0a1a0a"):
print (f"{name}\t\t|\t\tYes (PNG)")
masqueraded.append(path)
# predict(path)
elif var.startswith("ffd8ffe1????457869660000"):
print (f"{name}\t\t|\t\tYes (JPG)")
masqueraded.append(path)
# predict(path)
elif var.startswith("424D"):
print (f"{name}\t\t|\t\tYes")
masqueraded.append(path)
# predict(path)
elif var.startswith("474946383761"):
print (f"{name}\t\t|\t\tYes (GIF)")
masqueraded.append(path)
# predict(path)
else:
print (f"{name}\t\t|\t\tNo")
print(f"{i} files scanned successfully.")
print()
print(f"{len(masqueraded)} masqueraded files scanned successfully.")
print("list of all masqueraded images:")
print()
for j in masqueraded:
predict(j) | StarcoderdataPython |
139663 | <reponame>aagnone3/python-skeleton
import sys
from argparse import ArgumentParser
from python_skeleton_project import greet_world
def get_clargs():
parser = ArgumentParser()
parser.add_argument("-d", "--descriptor", help="Descriptor of world to greet.")
return parser.parse_args()
def main():
args = get_clargs()
sys.exit(greet_world(args.descriptor))
| StarcoderdataPython |
1666149 | CUSTOM_GROUP = 'custom-group' | StarcoderdataPython |
81158 | from abc import abstractmethod
from contextlib import contextmanager
from typing import List
import arcade
from arcade.gui.events import UIEvent
class InteractionMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.event_history: List[UIEvent] = []
def move_mouse(self, x: int, y: int):
self.on_mouse_motion(x, y, 0, 0)
def click_and_hold(self, x: int, y: int, button=arcade.MOUSE_BUTTON_LEFT):
self.on_mouse_press(
x=x, y=y, button=button, modifiers=0
)
def release(self, x: int, y: int, button=arcade.MOUSE_BUTTON_LEFT):
self.on_mouse_release(
x=x, y=y, button=button, modifiers=0
)
def click(self, x: int, y: int):
self.click_and_hold(x, y)
self.release(x, y)
def right_click(self, x: int, y: int):
self.click_and_hold(x, y, button=arcade.MOUSE_BUTTON_RIGHT)
self.release(x, y, button=arcade.MOUSE_BUTTON_RIGHT)
def _on_ui_event(self, event: UIEvent):
self.event_history.append(event)
@property
def last_event(self):
return self.event_history[-1] if self.event_history else None
@abstractmethod
def dispatch_ui_event(self, event):
pass
@contextmanager
def record_ui_events(widget, *names) -> List[UIEvent]:
events = []
def record(event):
events.append(event)
widget.push_handlers(**{name: record for name in names})
yield events
widget.remove_handlers(**{name: record for name in names})
| StarcoderdataPython |
3292633 | <gh_stars>1-10
import sys
from time import sleep
import pygame
from settings import Settings
from game_stats import GameStats
from scoreboard import Scoreboard
from button import Button
from archer import Archer
from arrow import Arrow
from monster import Monster
class MonsterInvasion:
# Class for game assets/behavior.
def __init__(self):
# Init the game & create resources.
pygame.init()
self.settings = Settings()
self.screen = pygame.display.set_mode(
(self.settings.screen_width, self.settings.screen_height))
pygame.display.set_caption("Monster Invasion")
# Create instance of game stats & scoreboard.
self.stats = GameStats(self)
self.sb = Scoreboard(self)
self.archer = Archer(self)
self.arrows = pygame.sprite.Group()
self.monsters = pygame.sprite.Group()
self._create_horde()
# Create a play button.
self.play_button = Button(self, "Play")
def _create_horde(self):
# Create a horde of monsters.
# Make a monster and find the number of monsters in a row.
# Spacing between each monster is equal to one monster width.
monster = Monster(self)
monster_width, monster_height = monster.rect.size
available_space_x = self.settings.screen_width - (2 * monster_width)
number_monsters_x = available_space_x // (2 * monster_width)
# Determine the number of rows of monsters that fit on the screen.
archer_height = self.archer.rect.height
available_space_y = (self.settings.screen_height - (3 * monster_height) - archer_height)
number_rows = available_space_y // (2 * monster_height)
# Create full horde of monsters.
for row_number in range(number_rows):
for monster_number in range(number_monsters_x):
self._create_monster(monster_number, row_number)
def _create_monster(self, monster_number, row_number):
# Create a monster and place it in a row.
monster = Monster(self)
monster_width, monster_height = monster.rect.size
monster.x = monster_width + 2 * monster_width * monster_number
monster.rect.x = monster.x
monster.rect.y = monster.rect.height + 2 * monster.rect.height * row_number
self.monsters.add(monster)
def _update_monsters(self):
# Check if horde is at edge, then update the positions of all monsters in the horde.
self._check_horde_edges()
self.monsters.update()
# Look for monster-archer collisions.
if pygame.sprite.spritecollideany(self.archer, self.monsters):
self._archer_hit()
# Look for monsters hitting the bottom of screen.
self._check_monsters_bottom()
def _check_horde_edges(self):
# Respond appropriately if monsters have reached an edge of screen.
for monster in self.monsters.sprites():
if monster.check_edges():
self._change_horde_direction()
break
def _check_monsters_bottom(self):
# Check if any monsters have reached the bottom of the screen.
screen_rect = self.screen.get_rect()
for monster in self.monsters.sprites():
if monster.rect.bottom >= screen_rect.bottom:
# Treat this the same as if archer got hit.
self._archer_hit()
break
def _change_horde_direction(self):
# Drop the entire horde and change horde direction.
for monster in self.monsters.sprites():
monster.rect.y += self.settings.horde_drop_speed
self.settings.horde_direction *= -1
def _check_events(self):
# Watch for keyboard/mouse events.
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_pos = pygame.mouse.get_pos()
self._check_play_button(mouse_pos)
elif event.type == pygame.KEYDOWN:
self._check_keydown_events(event)
elif event.type == pygame.KEYUP:
self._check_keyup_events(event)
def _check_play_button(self, mouse_pos):
# Start a new game when player clicks Play button.
button_clicked = self.play_button.rect.collidepoint(mouse_pos)
if button_clicked and not self.stats.game_active:
# Reset the game settings.
self.settings.initialize_dynamic_settings()
# Reset the game statistics.
self.stats.reset_stats()
self.stats.game_active = True
self.sb.prep_score()
self.sb.prep_level()
self.sb.prep_archers()
# Get rid of any remaining monsters & bullets.
self.monsters.empty()
self.arrows.empty()
# Create a new horde & center archer.
self._create_horde()
self.archer.center_archer()
# Hide mouse cursor
pygame.mouse.set_visible(False)
def _check_keydown_events(self, event):
# Respond to keypresses.
if event.key == pygame.K_RIGHT:
self.archer.moving_right = True
elif event.key == pygame.K_LEFT:
self.archer.moving_left = True
elif event.key == pygame.K_q:
sys.exit()
elif event.key == pygame.K_SPACE:
self._fire_arrow()
def _check_keyup_events(self, event):
# Response to key releases
if event.key == pygame.K_RIGHT:
self.archer.moving_right = False
elif event.key == pygame.K_LEFT:
self.archer.moving_left = False
def _fire_arrow(self):
# Create new arrow and add to arrow group.
if len(self.arrows) < self.settings.arrows_allowed:
new_arrow = Arrow(self)
self.arrows.add(new_arrow)
def _update_arrows(self):
# Update position of arrows & get rid of old arrows.
# Update arrow positions.
self.arrows.update()
# Get rid of arrows once they are off screen.
for arrow in self.arrows.copy():
if arrow.rect.bottom <= 0:
self.arrows.remove(arrow)
self._check_arrow_monster_collisions()
def _check_arrow_monster_collisions(self):
# Respond to arrow-monster collisions.
# Remove any arrows/monsters that have collided.
collisions = pygame.sprite.groupcollide(self.arrows, self.monsters, True, True)
if collisions:
for monsters in collisions.values():
self.stats.score += self.settings.monster_points
self.sb.prep_score()
self.sb.check_high_score()
if not self.monsters:
# Destroy existing arrows & create new horde.
self.arrows.empty()
self._create_horde()
self.settings.increase_speed()
# Increase level.
self.stats.level += 1
self.sb.prep_level()
def _archer_hit(self):
# Respond to the archer being hit by a monster.
if self.stats.lives_left > 0:
# Decrement lives_left & update scoreboard.
self.stats.lives_left -= 1
self.sb.prep_archers()
# Get rid of any remaining monsters/arrows.
self.monsters.empty()
self.arrows.empty()
# Create a new horde & center the archer.
self._create_horde()
self.archer.center_archer()
# Pause
sleep(0.5)
else:
self.stats.game_active = False
pygame.mouse.set_visible(True)
def _update_screen(self):
# Redraw the screen during each pass of the loop.
self.screen.fill(self.settings.bg_color)
self.archer.blitme()
for arrow in self.arrows.sprites():
arrow.draw_arrow()
self.monsters.draw(self.screen)
# Draw the score info.
self.sb.show_score()
# Draw the play button if game is inactive.
if not self.stats.game_active:
self.play_button.draw_button()
# Make most recently drawn screen visible.
pygame.display.flip()
def run_game(self):
# Start the main game loop.
while True:
self._check_events()
if self.stats.game_active:
self.archer.update()
self._update_arrows()
self._update_monsters()
self._update_screen()
if __name__ == '__main__':
# Make a game instance & run the game.
mi = MonsterInvasion()
mi.run_game() | StarcoderdataPython |
53054 | #!/usr/bin/python3
import json
import sys
from pprint import pprint
import requests
from config import database
import MySQLdb
try:
db = MySQLdb.connect(database["host"],
database["user"],
database["passwd"],
database["db"])
cur = db.cursor()
payload = {
"data": (
'[out:json][timeout:25];'
'area(3600109166)->.searchArea;'
'node["amenity"="bicycle_rental"]["network"="Citybike Wien"](area.searchArea);'
'out body;>;out skel qt;'
)
}
print("Overpass Abfrage")
r = requests.get('https://overpass-api.de/api/interpreter', params=payload)
data = r.json()
print("erfolgreich")
i = 0
for station in data["elements"]:
if station["type"] == "node":
tags = station["tags"]
cur.execute("REPLACE INTO stationen (ref, lon, lat, name) VALUES (%s,%s,%s,%s)",
(tags["ref"], station["lon"], station["lat"], tags["name"]))
i += 1
db.commit()
print("%s Stationen importiert" % i)
db.close()
except MySQLdb.Error as e:
print("Error %d: %s" % (e.args[0], e.args[1]))
sys.exit(1)
| StarcoderdataPython |
1660523 | import os
import pysftp
import sys
import subprocess
import glob
sys.path.append("C:\Users\jinkersont\.gnupg")
sys.path.append("C:\Users\jinkersont")
sys.path.append("C:\Program Files (x86)\gnupg\\bin")
print(sys.path)
UPLOAD = {
"SERVER": "192.168.80.33",
"PORT": 22,
"USERNAME": "sftpuser",
"KEYFILE": "C:\Users\jinkersont\.ssh\id_dsa.pub",
"FOLDER": "documents",
"TRANSFORMATION": "stars.ktr",
"GPGKEY": "<EMAIL>"
}
SUFFIX=".out"
PAN = "C:\Pentaho\design-tools\data-integration\Pan.bat"
GPG = "C:\Program Files (x86)\gnupg\\bin\gpg.exe"
print GPG
if not os.path.exists(GPG) :
print "Could not locate gpg!"
exit(-1)
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-t", "--transformation", dest="transformation",
help="Transformation File", metavar="FILE")
parser.add_option("-d", "--directory", dest="directory",
help="upload directory", metavar="DIRECTORY")
parser.add_option("-s", "--server", dest="server",
help="Upload Server", metavar="SERVER")
parser.add_option("-p", "--port", dest="port",
help="Upload Host Port", metavar="PORT", type="int")
parser.add_option("-g", "--gpgkey", dest="gpgkey",
help="The GPG Key", metavar="GPGKEY")
(options, args) = parser.parse_args()
if not options.server :
options.server = UPLOAD['SERVER']
if not options.port :
options.port = UPLOAD['PORT']
if not options.directory :
options.directory = UPLOAD['FOLDER']
if not options.transformation :
options.transformation = UPLOAD['TRANSFORMATION']
if not os.path.exists(options.transformation) :
print "Transformation file not found"
exit(-1)
if not options.gpgkey :
options.gpgkey = UPLOAD['GPGKEY']
# Run the transformation
print "Running " + PAN + " -file " + options.transformation
return_code = subprocess.call(PAN + " -file " + options.transformation)
# Encrypt the file
path = 'MyData*.csv'
files=glob.glob(path)
for file in files:
if not os.path.exists(file) :
print "Error: " + file + " do not exists"
exit(-1)
call_line = [GPG, "-r", options.gpgkey, "-e", file]
print call_line
return_code = subprocess.call(call_line)
if return_code > 0 :
print "Exiting: Encryption Failed - Status " + str(return_code)
exit(-1)
os.remove(file)
# sftp the file
print "SFTP the file"
cnopts = pysftp.CnOpts()
cnopts.hostkeys.load(UPLOAD['KEYFILE'])
srv = pysftp.Connection(options.server, port=options.port,
username=UPLOAD['USERNAME'], cnopts=cnopts )
srv.chdir(options.directory)
path = 'MyData*.gpg'
files=glob.glob(path)
for file in files:
print "Uploading " + file
srv.put(file)
os.remove(file)
srv.close()
| StarcoderdataPython |
3269587 | from backpack.core.derivatives.conv_transpose3d import ConvTranspose3DDerivatives
from backpack.extensions.firstorder.sum_grad_squared.sgs_base import SGSBase
class SGSConvTranspose3d(SGSBase):
def __init__(self):
super().__init__(
derivatives=ConvTranspose3DDerivatives(), params=["bias", "weight"]
)
| StarcoderdataPython |
3223837 | <filename>scripts/oecd/regional_demography/deaths/preprocess_csv.py<gh_stars>0
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import pandas as pd
def multi_index_to_single_index(df):
columns = []
for column in df.columns:
column = list(column)
column[1] = str(column[1])
columns.append(''.join(column))
df.columns = columns
return df.reset_index()
df = pd.read_csv('REGION_DEMOGR_death_tl3.csv')
# First remove geos with names that we don't have mappings to dcid for.
name2dcid = dict(json.loads(open('../name2dcid.json').read()))
df = df[df['Region'].isin(name2dcid.keys())]
# Second, replace the names with dcids.
df.replace({'Region': name2dcid}, inplace=True)
df['Year'] = '"' + df['Year'].astype(str) + '"'
temp = df[['REG_ID', 'Region', 'VAR', 'SEX', 'Year', 'Value']]
temp_multi_index = temp.pivot_table(values='Value',
index=['REG_ID', 'Region', 'Year'],
columns=['VAR', 'SEX'])
df_cleaned = multi_index_to_single_index(temp_multi_index)
VAR_to_statsvars = {
'D_TT': 'Count_MortalityEvent',
'D_Y0_4T': 'Count_MortalityEvent_Upto4Years',
'D_Y5_9T': 'Count_MortalityEvent_5To9Years',
'D_Y10_14T': 'Count_MortalityEvent_10To14Years',
'D_Y15_19T': 'Count_MortalityEvent_15To19Years',
'D_Y20_24T': 'Count_MortalityEvent_20To24Years',
'D_Y25_29T': 'Count_MortalityEvent_25To29Years',
'D_Y30_34T': 'Count_MortalityEvent_30To34Years',
'D_Y35_39T': 'Count_MortalityEvent_35To39Years',
'D_Y40_44T': 'Count_MortalityEvent_40To44Years',
'D_Y45_49T': 'Count_MortalityEvent_45To49Years',
'D_Y50_54T': 'Count_MortalityEvent_50To54Years',
'D_Y55_59T': 'Count_MortalityEvent_55To59Years',
'D_Y60_64T': 'Count_MortalityEvent_60To64Years',
'D_Y65_69T': 'Count_MortalityEvent_65To69Years',
'D_Y70_74T': 'Count_MortalityEvent_70To74Years',
'D_Y75_79T': 'Count_MortalityEvent_75To79Years',
'D_Y80_MAXT': 'Count_MortalityEvent_80OrMoreYears',
'D_Y0_14T': 'Count_MortalityEvent_Upto14Years',
'D_Y15_64T': 'Count_MortalityEvent_15To64Years',
'D_Y65_MAXT': 'Count_MortalityEvent_65OrMoreYears',
'D_TM': 'Count_MortalityEvent_Male',
'D_Y0_4M': 'Count_MortalityEvent_Upto4Years_Male',
'D_Y5_9M': 'Count_MortalityEvent_5To9Years_Male',
'D_Y10_14M': 'Count_MortalityEvent_10To14Years_Male',
'D_Y15_19M': 'Count_MortalityEvent_15To19Years_Male',
'D_Y20_24M': 'Count_MortalityEvent_20To24Years_Male',
'D_Y25_29M': 'Count_MortalityEvent_25To29Years_Male',
'D_Y30_34M': 'Count_MortalityEvent_30To34Years_Male',
'D_Y35_39M': 'Count_MortalityEvent_35To39Years_Male',
'D_Y40_44M': 'Count_MortalityEvent_40To44Years_Male',
'D_Y45_49M': 'Count_MortalityEvent_45To49Years_Male',
'D_Y50_54M': 'Count_MortalityEvent_50To54Years_Male',
'D_Y55_59M': 'Count_MortalityEvent_55To59Years_Male',
'D_Y60_64M': 'Count_MortalityEvent_60To64Years_Male',
'D_Y65_69M': 'Count_MortalityEvent_65To69Years_Male',
'D_Y70_74M': 'Count_MortalityEvent_70To74Years_Male',
'D_Y75_79M': 'Count_MortalityEvent_75To79Years_Male',
'D_Y80_MAXM': 'Count_MortalityEvent_80OrMoreYears_Male',
'D_Y0_14M': 'Count_MortalityEvent_Upto14Years_Male',
'D_Y15_64M': 'Count_MortalityEvent_15To64Years_Male',
'D_Y65_MAXM': 'Count_MortalityEvent_65OrMoreYears_Male',
'D_TF': 'Count_MortalityEvent_Female',
'D_Y0_4F': 'Count_MortalityEvent_Upto4Years_Female',
'D_Y5_9F': 'Count_MortalityEvent_5To9Years_Female',
'D_Y10_14F': 'Count_MortalityEvent_10To14Years_Female',
'D_Y15_19F': 'Count_MortalityEvent_15To19Years_Female',
'D_Y20_24F': 'Count_MortalityEvent_20To24Years_Female',
'D_Y25_29F': 'Count_MortalityEvent_25To29Years_Female',
'D_Y30_34F': 'Count_MortalityEvent_30To34Years_Female',
'D_Y35_39F': 'Count_MortalityEvent_35To39Years_Female',
'D_Y40_44F': 'Count_MortalityEvent_40To44Years_Female',
'D_Y45_49F': 'Count_MortalityEvent_45To49Years_Female',
'D_Y50_54F': 'Count_MortalityEvent_50To54Years_Female',
'D_Y55_59F': 'Count_MortalityEvent_55To59Years_Female',
'D_Y60_64F': 'Count_MortalityEvent_60To64Years_Female',
'D_Y65_69F': 'Count_MortalityEvent_65To69Years_Female',
'D_Y70_74F': 'Count_MortalityEvent_70To74Years_Female',
'D_Y75_79F': 'Count_MortalityEvent_75To79Years_Female',
'D_Y80_MAXF': 'Count_MortalityEvent_80OrMoreYears_Female',
'D_Y0_14F': 'Count_MortalityEvent_Upto14Years_Female',
'D_Y15_64F': 'Count_MortalityEvent_15To64Years_Female',
'D_Y65_MAXF': 'Count_MortalityEvent_65OrMoreYears_Female',
}
df_cleaned.rename(columns=VAR_to_statsvars, inplace=True)
df_cleaned.to_csv('OECD_deaths_cleaned.csv',
index=False,
quoting=csv.QUOTE_NONE)
# Automate Template MCF generation since there are many Statistical Variables.
TEMPLATE_MCF_TEMPLATE = """
Node: E:OECD_deaths_cleaned->E{index}
typeOf: dcs:StatVarObservation
variableMeasured: dcs:{stat_var}
measurementMethod: dcs:OECDRegionalStatistics
observationAbout: C:OECD_deaths_cleaned->Region
observationDate: C:OECD_deaths_cleaned->Year
observationPeriod: "P1Y"
value: C:OECD_deaths_cleaned->{stat_var}
"""
stat_vars = df_cleaned.columns[3:]
with open('OECD_deaths.tmcf', 'w', newline='') as f_out:
for i in range(len(stat_vars)):
f_out.write(
TEMPLATE_MCF_TEMPLATE.format_map({
'index': i + 1,
'stat_var': stat_vars[i]
}))
| StarcoderdataPython |
4821446 | # Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing OpenJDK installation and cleanup functions."""
from perfkitbenchmarker import flags
FLAGS = flags.FLAGS
JAVA_HOME = '/usr'
flags.DEFINE_string('openjdk_version', '7', 'Version of openjdk to use. '
'You must use this flag to specify version 8 for '
'ubuntu 1604 and other operating systems where '
'openjdk7 is not installable by default')
def YumInstall(vm):
"""Installs the OpenJDK package on the VM."""
vm.InstallPackages('java-1.{0}.0-openjdk-devel'.format(FLAGS.openjdk_version))
def AptInstall(vm):
"""Installs the OpenJDK package on the VM."""
vm.InstallPackages('openjdk-{0}-jdk'.format(FLAGS.openjdk_version))
| StarcoderdataPython |
3387252 | <reponame>bjodah/chemreac
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Two coupled decays
------------------
:download:`examples/decay.py` demonstrates accuracy
by comparison with analytic solution for a simple system
of two coupled decays
::
$ python decay.py --help
.. exec::
echo "::\\n\\n"
python examples/examples/decay.py --help | sed "s/^/ /"
Here is an example generated by:
::
$ python decay.py --plot --savefig decay.png
.. image:: ../_generated/decay.png
Motivation for sigmoid damped exp(); vary tend: 5, 700, 1700.
Never mind 700 not being correctly represented, the problem
is 1700 completely ruining the integration (NaN's due to overflow).
::
$ python decay.py --plot --savefig decay_long.png --rates 1.0 --logy --logt \
--rtol 1e-13 --atol 1e-6 --scale-err 100.0 --plotlogy --nt 1024 --tend 1700
.. image:: ../_generated/decay_long.png
::
$ python decay.py --plot --savefig decay_long_damp.png --rates 1.0 --logy \
--logt --rtol 1e-13 --atol 1e-6 --scale-err 100.0 --plotlogy --nt 1024 \
--tend 1700 --sigm-damp
.. image:: ../_generated/decay_long_damp.png
"""
from __future__ import absolute_import, division, print_function
import argh
import numpy as np
from chemreac import ReactionDiffusion
from chemreac.integrate import run
from chemreac.util.analysis import solver_linear_error
from chemreac.util.plotting import save_and_or_show_plot
analytic = {
0: lambda y0, k, t: (
y0[0] * np.exp(-k[0]*t)),
1: lambda y0, k, t: (
y0[1] * np.exp(-k[1] * t) + y0[0] * k[0] / (k[1] - k[0]) *
(np.exp(-k[0]*t) - np.exp(-k[1]*t))),
2: lambda y0, k, t: (
y0[2] * np.exp(-k[2] * t) + y0[1] * k[1] / (k[2] - k[1]) *
(np.exp(-k[1]*t) - np.exp(-k[2]*t)) +
k[1] * k[0] * y0[0] / (k[1] - k[0]) *
(1 / (k[2] - k[0]) * (np.exp(-k[0]*t) - np.exp(-k[2]*t)) -
1 / (k[2] - k[1]) * (np.exp(-k[1]*t) - np.exp(-k[2]*t))))
}
def get_Cref(k, y0, tout):
coeffs = k + [0]*(3-len(k))
return np.column_stack([
analytic[i](y0, coeffs, tout) for i in range(
min(3, len(k)+1))])
def integrate_rd(tend=2.0, A0=1.0, nt=67, t0=0.0,
rates='3.40715,4.0', logy=False, logt=False,
plot=False, savefig='None', method='bdf',
atol='1e-7,1e-6,1e-5', rtol='1e-6', sigm_damp=False,
num_jac=False, scale_err=1.0, small='None', use_log2=False,
plotlogy=False, plotlogt=False, verbose=False):
"""
Analytic solution through Bateman equation =>
ensure :math:`|k_i - k_j| \\gg eps`
"""
k = list(map(float, rates.split(',')))
n = len(k)+1
if n > 4:
raise ValueError("Max 3 consequtive decays supported at the moment.")
atol = list(map(float, atol.split(',')))
if len(atol) == 1:
atol = atol[0]
rtol = float(rtol)
rd = ReactionDiffusion(
n, [[i] for i in range(n-1)], [[i] for i in range(1, n)],
k, logy=logy, logt=logt, use_log2=use_log2)
y0 = np.zeros(n)
y0[0] = A0
if small == 'None':
tiny = None
else:
tiny = 0
y0 += float(small)
tout = np.linspace(t0, tend, nt)
integr = run(rd, y0, tout, atol=atol, rtol=rtol, method=method,
with_jacobian=not num_jac, sigm_damp=sigm_damp, tiny=tiny)
Cout, yout, info = integr.Cout, integr.yout, integr.info
Cref = get_Cref(k, y0, tout - tout[0]).reshape((nt, 1, n))
if verbose:
print('rate: ', k)
print(info)
if plot:
nshow = min(n, 3)
try:
min_atol = min(info['atol'])
except:
min_atol = info['atol']
import matplotlib.pyplot as plt
plt.figure(figsize=(6, 10))
c = 'rgb'
for i, l in enumerate('ABC'[:nshow]):
ax = plt.subplot(nshow+1, 1, 1)
if plotlogy:
ax.set_yscale('log')
if plotlogt:
ax.set_xscale('log')
ax.plot(tout, Cout[:, 0, i], label=l, color=c[i])
ax = plt.subplot(nshow+1, 1, 2+i)
if plotlogy:
ax.set_yscale('symlog') # abs error might be < 0
if plotlogt:
ax.set_xscale('log')
ax.plot(tout, (Cout[:, 0, i]-Cref[:, 0, i])/min_atol,
label=l, color=c[i])
try:
atol = info['atol'][i]
except:
atol = info['atol']
try:
rtol = info['rtol'][i]
except:
rtol = info['rtol']
le_l, le_u = solver_linear_error(
yout[:, 0, i], rtol, atol, rd.logy, scale_err=scale_err, expb=rd.expb)
plt.fill_between(tout, (le_l - Cout[:, 0, i])/min_atol,
(le_u - Cout[:, 0, i])/min_atol,
color=c[i], alpha=0.2)
# Print indices and values of violations of (scaled) error bounds
def _print(violation):
print(violation)
print(le_l[violation],
Cref[violation, 0, i],
le_u[violation])
l_viols = np.where(le_l > Cref[:, 0, i])[0]
u_viols = np.where(le_u < Cref[:, 0, i])[0]
if verbose and (len(l_viols) > 0 or len(u_viols) > 0):
print("Outside error bounds for rtol, atol:", rtol, atol)
# for violation in chain(l_viols, u_viols):
# _print(violation)
plt.subplot(nshow+1, 1, 1)
plt.title('Concentration vs. time')
plt.legend(loc='best', prop={'size': 11})
plt.xlabel('t')
plt.ylabel('[X]')
for i in range(nshow):
plt.subplot(nshow+1, 1, 2+i)
plt.title('Absolute error in [{}](t) / min(atol)'.format('ABC'[i]))
plt.legend(loc='best')
plt.xlabel('t')
plt.ylabel('|E[{0}]| / {1:7.0g}'.format('ABC'[i], min_atol))
plt.tight_layout()
save_and_or_show_plot(savefig=savefig)
return integr.yout, Cref, rd, info
if __name__ == '__main__':
argh.dispatch_command(integrate_rd, output_file=None)
| StarcoderdataPython |
4832478 | <gh_stars>0
"""
Helper functions that can display leaflet maps inline in an ipython notebook
"""
import IPython.display as idisp
import html as hgen
def inline_map(map):
"""
Embeds the HTML source of the map directly into the IPython notebook.
This method will not work if the map depends on any files (json data). Also this uses
the HTML5 srcdoc attribute, which may not be supported in all browsers.
"""
map._build_map()
return idisp.HTML('<iframe srcdoc="{srcdoc}" style="width: 100%; height: 510px; border: none"></iframe>'.format(srcdoc=map.HTML.replace('"', '"')))
def inline_maps(map_list):
"""
Embeds the HTML source of the map_list directly into the IPython notebook.
This method will not work if the map depends on any files (json data). Also this uses
the HTML5 srcdoc attribute, which may not be supported in all browsers.
map_list: 2-D array of maps. dimensions should be [nRows][nCols]. The method will throw a RuntimeError if not
nRows: Number of rows
nCols: Number of columns
"""
nRows = len(map_list)
# nCols = max([len(row) for row in map_list])
hb = hgen.HTML()
t = hb.table(width="100%")
for r in range(nRows):
row = t.tr
for c in range(len(map_list[r])):
currMap = map_list[r][c]
currMap._build_map()
row.td('<iframe srcdoc="{srcdoc}" style="width: 100%; height: 510px; border: none"></iframe>'.format(srcdoc=currMap.HTML.replace('"', '"')))
return idisp.HTML('<iframe srcdoc="{srcdoc}" style="width: 100%; height: {ht}px; border: none"></iframe>'.format(srcdoc=str(t).replace('"', '"'), ht=510*nRows))
def embed_map(map, path="map.html"):
"""
Embeds a linked iframe to the map into the IPython notebook.
Note: this method will not capture the source of the map into the notebook.
This method should work for all maps (as long as they use relative urls).
"""
map.create_map(path=path)
return idisp.IFrame(src="files/{path}".format(path=path), width="100%", height="510")
| StarcoderdataPython |
131040 | <gh_stars>1-10
#!/usr/bin/env python
"""
Module test_interactive_prompt
"""
import os
import sys
sys.path.append(os.path.realpath('.'))
from creoconfig import Config
from creoconfig.exceptions import *
def interactive_prompt():
c = Config()
c.add_option('strkey',
prefix='Please enter string',
help='This is a string key')
c.add_option('intkey',
prefix='Please enter integer value',
help='This is a int key',
type=int)
c.add_option('choice_key',
prefix='Please enter one of the integer choices',
help='This is a int key which only allows certail values',
type=int,
choices=[1, 2, 3, 10])
c.add_option('choice_key_str',
prefix='Please choose one of the string values',
help='This is a string key which only allows certail values',
type=str,
choices=['a', 'b', 'c', '10'])
c.prompt()
c.data = 'mydataval'
c.another = 'moredata'
c.another1 = 'abcs'
print c
print c._store.__dict__
print c._available_keywords
print c._isbatch
# print "Missing: %s" % c.missingkey
print("Configuration:")
for k,v in c.iteritems():
print("\t%s: %s" % (k, v))
if __name__ == '__main__':
print "INFO: Running interactive tests!"
interactive_prompt()
| StarcoderdataPython |
4824889 | <reponame>danielshahaf/svnmailer-debian<filename>src/lib/svnmailer/notifier/mail.py
# -*- coding: utf-8 -*-
#
# Copyright 2004-2006 <NAME> or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Text based email notifiers (either piped to a program or via SMTP)
"""
__author__ = "<NAME>"
__docformat__ = "epytext en"
__all__ = ['getNotifier']
def getNotifier(config, groupset):
""" Returns an initialized notifier or nothing
@param config: The svnmailer config
@type config: C{svnmailer.settings.Settings}
@param groupset: The groupset to process
@type groupset: C{list}
@return: The list of notifiers (containing 0 or 1 member)
@rtype: C{list}
"""
from svnmailer import settings
from svnmailer.notifier import _textmail, _multimail
cls = None
if config.general.sendmail_command:
cls = SendmailSubmitter
elif config.general.smtp_host:
cls = SMTPSubmitter
if cls:
mtype = (groupset.groups[0].mail_type or u'single').split()[0].lower()
is_commit = (config.runtime.mode == settings.modes.commit)
mod = (is_commit and mtype == u'multipart') and \
_multimail or _textmail
return mod.getNotifier(cls, config, groupset)
return []
class SMTPSubmitter(object):
""" Use SMTP to submit the mail """
_settings = None
def sendMail(self, sender, to_addr, mail):
""" Sends the mail via SMTP """
import smtplib, cStringIO
fp = cStringIO.StringIO()
mail.dump(fp)
mail = fp.getvalue()
fp.close()
general = self._settings.general
conn = smtplib.SMTP(general.smtp_host)
if general.smtp_user:
conn.login(general.smtp_user, general.smtp_pass)
conn.sendmail(sender, to_addr, mail)
conn.quit()
class SendmailSubmitter(object):
""" Pipe all stuff to a mailer """
_settings = None
def sendMail(self, sender, to_addr, mail):
""" Sends the mail via a piped mailer """
from svnmailer import util
pipe = util.getPipe2(self._getMailCommand(sender, to_addr))
pipe.fromchild.close() # we don't expect something
mail.dump(pipe.tochild)
pipe.tochild.close()
# what do we do with the return code?
pipe.wait()
def _getMailCommand(self, sender, to_addr):
""" Returns the mailer command
The command is created using sendmail conventions.
If you want another commandline, override this method.
@param sender: The sender address
@type sender: C{str}
@param to_addr: The receivers
@type to_addr: C{list}
@return: The command
@rtype: C{list}
"""
cmd = list(self._settings.general.sendmail_command)
cmd[1:] = [(isinstance(arg, unicode) and
[arg.encode("utf-8")] or [arg])[0] for arg in cmd[1:]
]
cmd.extend(['-f', sender])
cmd.extend(to_addr)
return cmd
| StarcoderdataPython |
3301448 | <reponame>mbari-org/vars-gridview
# -*- coding: utf-8 -*-
"""
widgets.py -- A set of classes to extend widgets from pyqtgraph and pyqt for annotation purposes
Copyright 2020 Monterey Bay Aquarium Research Institute
Distributed under MIT license. See license.txt for more information.
"""
from typing import List
import cv2
import numpy as np
from pyqtgraph.Qt import QtCore, QtGui, QtWidgets
from config import settings
from libs.annotation import VARSLocalization
class RectWidget(QtWidgets.QGraphicsWidget):
rectHover = QtCore.Signal(object)
def __init__(self,
localizations: List[VARSLocalization],
image: np.ndarray, index: int,
parent=None, text_label='rect widget'):
QtWidgets.QGraphicsWidget.__init__(self, parent)
self.localizations = localizations # Dumb, but it works
self.image = image
self.index = index
self.labelheight = 30
self.bordersize = 4
self.picdims = [240, 240]
self.zoom = .5
self.text_label = text_label
self._boundingRect = QtCore.QRect()
self.setAcceptHoverEvents(True)
self.bgColor = QtCore.Qt.darkGray
self.hoverColor = QtCore.Qt.lightGray
self.isLastSelected = False
self.isSelected = False
self.forReview = False
self.toDiscard = False
self.roi = None
self.pic = None
self.update_roi_pic()
self.deleted = False
def update_roi_pic(self):
self.roi = self.localization.get_roi(self.image)
self.pic = self.getpic(self.roi)
self.update()
@property
def isAnnotated(self) -> bool:
return self.localizations[self.index].verified
@property
def localization(self):
return self.localizations[self.index]
@property
def image_width(self):
return self.image.shape[1]
@property
def image_height(self):
return self.image.shape[0]
def toqimage(self, img):
height, width, bytesPerComponent = img.shape
bytesPerLine = bytesPerComponent * width
cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)
qimg = QtGui.QImage(img.copy(), width, height, bytesPerLine, QtGui.QImage.Format_RGB888)
return qimg
def update_zoom(self, zoom):
self.zoom = zoom
self.boundingRect()
self.updateGeometry()
def getFullImage(self):
return np.rot90(self.image, 3, (0, 1))
def boundingRect(self):
# scale and zoom
width = self.zoom * (self.picdims[0] + self.bordersize * 2)
height = self.zoom * (self.picdims[1] + self.labelheight + self.bordersize * 2)
thumb_widget_rect = QtCore.QRectF(0.0, 0.0, width, height)
self._boundingRect = thumb_widget_rect
return thumb_widget_rect
def sizeHint(self, which, constraint=QtCore.QSizeF()):
return self._boundingRect.size()
def getpic(self, roi):
height, width, channels = roi.shape
if height >= width:
scale = self.picdims[0] / height
else:
scale = self.picdims[0] / width
new_width = int(width * scale) - 2 * self.bordersize
new_height = int(height * scale) - 2 * self.bordersize
roi = cv2.resize(roi, (new_width, new_height))
# center roi on dims
w_pad = int((self.picdims[0] - new_width) / 2)
h_pad = int((self.picdims[1] - new_height) / 2)
roi = cv2.copyMakeBorder(
roi,
h_pad,
h_pad,
w_pad,
w_pad,
cv2.BORDER_CONSTANT,
value=settings.BG_COLOR
)
qimg = self.toqimage(roi)
orpixmap = QtGui.QPixmap.fromImage(qimg)
return orpixmap
def paint(self, painter, option, widget):
pen = QtGui.QPen()
pen.setWidth(1)
pen.setBrush(QtCore.Qt.black)
painter.setPen(pen)
# very simple selection and annotation logic
if self.isSelected:
fill_color = QtCore.Qt.green
elif self.isAnnotated:
fill_color = QtCore.Qt.yellow
else:
fill_color = QtCore.Qt.darkGray
# fill behind image
if self.isLastSelected:
painter.fillRect(QtCore.QRect(0,
0,
self.zoom * (self.pic.rect().width() + 2 * self.bordersize),
self.zoom * (
self.pic.rect().height() + self.labelheight + 2 * self.bordersize)),
QtGui.QColor(61, 174, 233, 255))
# Fill label
painter.fillRect(QtCore.QRect(self.zoom * self.bordersize,
self.zoom * (self.bordersize + self.pic.rect().height()),
self.zoom * self.pic.rect().width(),
self.zoom * self.labelheight),
fill_color)
# Draw image
painter.drawPixmap(QtCore.QRect(self.zoom * self.bordersize,
self.zoom * self.bordersize,
self.zoom * self.pic.rect().width(),
self.zoom * self.pic.rect().height()),
self.pic,
self.pic.rect())
# Draw text
text_rect = QtCore.QRect(0,
self.zoom * (self.pic.rect().y() + self.pic.rect().height()),
self.zoom * self.pic.rect().width(),
self.zoom * self.labelheight)
painter.drawText(text_rect, QtCore.Qt.AlignCenter, self.text_label)
if self.toDiscard:
painter.fillRect(QtCore.QRect(self.zoom * self.bordersize,
self.zoom * (self.bordersize),
self.zoom * self.pic.rect().width(),
self.zoom * self.labelheight),
QtCore.Qt.gray)
text_rect = QtCore.QRect(0,
self.zoom * (self.pic.rect().y()),
self.zoom * self.pic.rect().width(),
self.zoom * self.labelheight)
painter.setPen(QtCore.Qt.red)
painter.drawText(text_rect, QtCore.Qt.AlignCenter, "To Remove")
if self.forReview:
painter.fillRect(QtCore.QRect(self.zoom * self.bordersize,
self.zoom * (self.bordersize),
self.zoom * self.pic.rect().width(),
self.zoom * self.labelheight),
QtCore.Qt.gray)
text_rect = QtCore.QRect(0,
self.zoom * (self.pic.rect().y()),
self.zoom * self.pic.rect().width(),
self.zoom * self.labelheight)
painter.setPen(QtCore.Qt.blue)
painter.drawText(text_rect, QtCore.Qt.AlignCenter, "For Review")
def mousePressEvent(self, event):
self.isSelected = not self.isSelected
self.update()
self.rectHover.emit(self)
def mouseReleaseEvent(self, event):
pass
def hoverEnterEvent(self, event):
modifiers = QtWidgets.QApplication.keyboardModifiers()
if modifiers == QtCore.Qt.ControlModifier:
self.isSelected = not self.isSelected
self.update()
self.rectHover.emit(self)
| StarcoderdataPython |
3394441 | <gh_stars>1-10
def find_column(input, lexpos):
line_start = input.rfind('\n', 0, lexpos) + 1
return (lexpos - line_start) + 1
| StarcoderdataPython |
3224186 | import json
import logging
from datetime import datetime
from pathlib import Path
from typing import Union
from game import Position
from game.client.controller.menu import Menu
from game.client.model.action import Action, ActionType, MoveAction, InventoryAction, ItemAction
from game.client.model.model import Model
from game.client.view.user_command import UserCommand
from game.client.view.view import View
class Controller:
FRAMES_PER_SECOND = 20
GAME_CONFIG_PATH = Path('resources', 'config', 'game_config.json')
ENTITIES_CONFIG_PATH = Path('resources', 'config', 'entities.json')
LOG_DIR_PATH = Path('resources', 'logs')
def __init__(self, *args, **kwargs):
with self.GAME_CONFIG_PATH.open('r') as src:
self.game_config = json.load(src)
with self.ENTITIES_CONFIG_PATH.open('r') as src:
self.entities_desc = json.load(src)
self.model = Model()
self.menu = None
self.view = View(self, self.model, self.entities_desc)
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(logging.DEBUG)
self._create_log_handler()
def _create_log_handler(self):
if not Controller.LOG_DIR_PATH.exists():
Controller.LOG_DIR_PATH.mkdir()
current_date = datetime.now().strftime('%Y.%m.%d %H.%M.%S')
log_name = 'client {}.txt'.format(current_date)
log_file = Controller.LOG_DIR_PATH / log_name
file_handler = logging.FileHandler(str(log_file))
file_handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%m/%d/%Y %I:%M:%S %p')
file_handler.setFormatter(formatter)
self.logger.addHandler(file_handler)
"""
Starts the game on the client side.
Processes all user actions and interacts with server.
"""
def start_game(self):
self.view.create()
error = None
self.logger.info('Game started')
while True:
self.logger.info('On new game stage')
self.view.initialize()
self.menu = Menu(self.view, error)
error = None
try:
self.logger.info('On make_choice stage')
network = self.menu.make_choice()
if network is None:
self.logger.info('No network received, possible exit button was clicked')
break
self.logger.info(f'Network was successfully created, singleplayer mode: {network.singleplayer}')
if not network.singleplayer:
self.view.set_game_id(network.game_id)
else:
self.view.set_game_id(None)
self.logger.info('Starting game loop')
while True:
self.logger.info('Receiving game state...')
state = network.get_state()
self.logger.info('Success')
self.model.update(state)
self.view.refresh_game()
if self.model.hero.stats.health == 0:
quit = False
while self.view.has_user_commands():
cmd = self.view.get_user_command()
if cmd == UserCommand.QUIT:
quit = True
if quit:
break
else:
self.view.clear_user_command_queue()
if state.my_turn:
action = self._get_user_action()
if action is None:
continue
network.send_action(action)
if action.type == ActionType.QUIT_ACTION:
break
self.view.delay(1.0 / self.FRAMES_PER_SECOND)
self.logger.info('Game successfully finished')
except Exception as e:
error = 'Disconnected from server'
self.logger.error('Disconnected from server')
self.logger.exception(e)
finally:
self.menu.destroy()
self.view.destroy()
def _get_user_action(self) -> Union[Action, None]:
while True:
cmd = self.view.get_user_command()
if cmd is UserCommand.UNKNOWN:
return None
if cmd in [UserCommand.UP, UserCommand.DOWN, UserCommand.LEFT, UserCommand.RIGHT, UserCommand.SKIP]:
action = self._process_move(cmd)
if action is not None:
return action
continue
if cmd == UserCommand.INVENTORY:
action = self._process_inventory()
if action is not None:
return action
continue
if cmd == UserCommand.QUIT:
action = Action(type=ActionType.QUIT_ACTION, desc=None)
return action
# TODO add processing of other available commands
def _process_move(self, cmd: UserCommand) -> Union[Action, None]:
dr, dc = {UserCommand.UP: (-1, 0),
UserCommand.DOWN: (+1, 0),
UserCommand.LEFT: ( 0, -1),
UserCommand.RIGHT: ( 0, +1),
UserCommand.SKIP: ( 0, 0)}[cmd]
hero_position = self.model.hero.position
new_position = Position.as_position(hero_position.row + dr, hero_position.col + dc)
if self.model.labyrinth.is_wall(new_position):
return None
return Action(type=ActionType.MOVE_ACTION,
desc=MoveAction(row=new_position.row, column=new_position.col))
def _process_inventory(self) -> Union[Action, None]:
inventory = self.model.inventory
inventory.open()
self.view.refresh_game()
action = None
while True:
cmd = self.view.get_user_command()
if cmd == UserCommand.INVENTORY:
break
if cmd == UserCommand.DOWN:
inventory.select_next_item()
self.view.refresh_game()
continue
if cmd == UserCommand.UP:
inventory.select_previous_item()
self.view.refresh_game()
continue
if inventory.no_item_selected():
continue
if cmd == UserCommand.USE:
item_id = inventory.get_selected_item_position()
action = Action(type=ActionType.INVENTORY_ACTION,
desc=InventoryAction(item_id=item_id, action=ItemAction.USE))
break
if cmd == UserCommand.DROP:
item_id = inventory.get_selected_item_position()
action = Action(type=ActionType.INVENTORY_ACTION,
desc=InventoryAction(item_id=item_id, action=ItemAction.DROP))
break
inventory.close()
self.view.refresh_game()
return action
| StarcoderdataPython |
177482 | <reponame>OnroerendErfgoed/static_map_generator
def merge_dicts(*dict_args):
"""
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
def rescale_bbox(height, width, bbox):
"""
In case of metric coordinates:
Increase either longitude or either latitude (on both min and max values)
to obtain the same scale dimension (longitude/latitude) of the image scale dimensions (height/width)
:param height: height of the image
:param width: width of the image
:param bbox: bbox of the map
:return:
"""
x1, y1, x2, y2 = bbox
scale_image = float(height) / float(width)
scale_bbox = float(y2 - y1) / float(x2 - x1)
if scale_image < scale_bbox:
x = (((y2 - y1) / scale_image) - x2 + x1) / 2
return [x1 - x, y1, x2 + x, y2]
elif scale_image > scale_bbox:
y = ((scale_image * (x2 - x1)) - y2 + y1) / 2
return [x1, y1 - y, x2, y2 + y]
else:
return bbox
def calculate_scale(map_scale, map_width):
"""
Calculates the image scale with in pixels together with the scale label
using map scale (meters per pixels) and map width (pixels)
"""
image_width_meter = round(map_scale * float(map_width))
scale_num_guess = str(int(round(image_width_meter * 0.2)))
scale_num = int(2 * round(float(int(scale_num_guess[0])) / 2)) * 10 ** (
len(scale_num_guess[1:]))
scale_num = scale_num if scale_num else 1 * 10 ** (len(scale_num_guess[1:]))
scale_width = round(scale_num / map_scale)
scale_label = f"{scale_num} m" if scale_num < 1000 else "{} km".format(
scale_num / 1000)
return scale_width, scale_label
| StarcoderdataPython |
1781960 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# BSD 3-Clause License
# Copyright (c) 2021, Tokyo Robotics Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the Tokyo Robotics Inc. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import rospy
import os
from pytoroboeye import toroboeye
from toroboeye_msgs.srv import *
ABSPATH = os.path.abspath(__file__)
NODE_NAME = os.path.splitext(os.path.basename(ABSPATH))[0]
def wait_for_service():
rospy.loginfo('Node starts: ' + NODE_NAME)
rospy.loginfo('waiting services')
rospy.wait_for_service('connect')
rospy.wait_for_service('disconnect')
rospy.wait_for_service('get_capture_setting')
rospy.wait_for_service('set_capture_setting')
rospy.wait_for_service('write')
rospy.wait_for_service('activate')
rospy.wait_for_service('deactivate')
rospy.wait_for_service('capture')
rospy.wait_for_service('wait_for_state')
rospy.wait_for_service('wait_for_active')
rospy.wait_for_service('wait_for_inactive')
rospy.wait_for_service('wait_for_frame')
rospy.wait_for_service('stop')
rospy.wait_for_service('get_intrinsics')
rospy.wait_for_service('update_frame')
rospy.wait_for_service('update_intrinsics')
rospy.loginfo('finish waiting services')
def connect(ip):
try:
service = rospy.ServiceProxy('connect', Connect)
rospy.loginfo(' Connecting to ToroboEye controller . . . ')
response = service(ip, True)
if response.success:
rospy.loginfo('Connection Successful.')
else:
rospy.logerr(response.message)
except rospy.ServiceException, e:
# print "service call failed: %s" % e
rospy.logerr(" Connection Failed. Plese Check LAN Cable Connection on controller.")
raise
else:
if not response.success:
raise Exception(response.message)
def disconnect():
try:
service = rospy.ServiceProxy('disconnect',Disconnect)
rospy.loginfo("Disconnecting...")
response = service()
if response.success:
rospy.loginfo("Successful Disconnecting.")
else:
rospy.logerr(response.message)
except rospy.ServiceException, e:
rospy.logerr("Failed to Disconnect.")
raise
else:
if not response.success:
raise Exception(response.message)
def get_capture_setting():
try:
service = rospy.ServiceProxy('get_capture_setting',GetCaptureSetting)
rospy.loginfo('Loading Current Setting . . .')
response = service()
if response.success:
rospy.loginfo('Loading to Completed.' )
else:
rospy.logerr(response.message)
except rospy.ServiceException, e:
rospy.logerr("Failed to Loading Setting. Plese Check Controller Status")
raise
else:
if not response.success:
raise Exception(response.message)
return response
def get_status(id):
try:
service = rospy.ServiceProxy('get_status', GetStatus)
rospy.loginfo("Checking Current Status...")
response = service(id)
if response.success:
rospy.loginfo('Confirmed Status.')
else:
rospy.logerr(response.message)
except rospy.ServiceException, e:
rospy.logerr("Failed to Confirm Status.")
raise
else:
if not response.success:
raise Exception(response.message)
return response.state
def set_capture_setting(
device_illuminant_power = 8,
depth_illuminant_color = toroboeye.Setting.DEPTH.ILLUMINANT_COLOR.RED,
depth_coding_pattern = toroboeye.Setting.DEPTH.CODING_PATTERN.GRAYCODE_BASE,
depth_accuracy = 2,
depth_exposure_time = 1,
color_strobe_intensity = 4,
color_exposure_time = 1
):
try:
service = rospy.ServiceProxy('set_capture_setting',SetCaptureSetting)
rospy.loginfo("Loading Current Capturing Setting. . .")
response = service(
device_illuminant_power,
depth_illuminant_color ,
depth_coding_pattern ,
depth_accuracy ,
depth_exposure_time ,
color_strobe_intensity ,
color_exposure_time ,
""
)
if response.success:
rospy.loginfo("Successful Loading Capturing Setting.")
else:
rospy.logerr(response.message)
except rospy.ServiceException, e:
print "service call failed: %s" % e
rospy.logerr("Failed to Load Current Capturing Setting.")
raise
else:
if not response.success:
raise Exception(response.message)
def write():
try:
service = rospy.ServiceProxy('write',Write)
print('[write pattern sets data into torobo eye device]')
response = service()
if response.success:
rospy.loginfo('success')
else:
rospy.logerr(response.message)
except rospy.ServiceException, e:
print "service call failed: %s" % e
raise
else:
if not response.success:
raise Exception(response.message)
def activate():
try:
service = rospy.ServiceProxy('activate', Activate)
rospy.loginfo("Selected Activation.")
response = service()
if response.success:
rospy.loginfo('Foward to Command...')
else:
rospy.logerr(response.message)
except rospy.ServiceException, e:
rospy.loginfo("Failed to Activate.")
raise
else:
if not response.success:
raise Exception(response.message)
def deactivate():
try:
service = rospy.ServiceProxy('deactivate',Deactivate)
rospy.loginfo("Selected Deactivation")
response = service()
if response.success:
rospy.loginfo("Foward Command. . .")
else:
rospy.logerr(response.message)
except rospy.ServiceException, e:
rospy.logerr("Failed to Deactivate")
raise
else:
if not response.success:
raise Exception(response.message)
def capture(oneshot = True):
try:
service = rospy.ServiceProxy('capture',Capture)
rospy.loginfo('Capturing by Oneshot . . .')
response = service(oneshot)
if response.success:
rospy.loginfo('Successful Oneshot Capturing')
else:
rospy.logerr(response.message)
except rospy.ServiceException, e:
rospy.logerr("Failed to Capture. Please Check Activation")
print "service call failed: %s" % e
raise
else:
if not response.success:
raise Exception(response.message)
def wait_for_state(activation, processing, timeout=None):
try:
service = rospy.ServiceProxy('wait_for_state',WaitForState)
print('[block thread until state of torobo eye device get to specified state]')
response = service(activation, processing, timeout)
if response.success:
rospy.loginfo('success')
else:
rospy.logerr(response.message)
except rospy.ServiceException, e:
print "service call failed: %s" % e
raise
else:
if not response.success:
raise Exception(response.message)
def wait_for_active(timeout=None):
try:
service = rospy.ServiceProxy('wait_for_active',WaitForActive)
rospy.loginfo("Activating. . .")
response = service(timeout)
if response.success:
rospy.loginfo('Successful Activation.')
else:
rospy.logerr(response.message)
except rospy.ServiceException, e:
rospy.logerr("Failed to Activate.")
raise
else:
if not response.success:
raise Exception(response.message)
def wait_for_inactive(timeout=None):
try:
service = rospy.ServiceProxy('wait_for_inactive',WaitForInactive)
rospy.loginfo('Deactivating. . .')
response = service(timeout)
if response.success:
rospy.loginfo("Successful Deactication.")
else:
rospy.logerr(response.message)
except rospy.ServiceException, e:
rospy.logerr("Failed to Deativate.")
raise
else:
if not response.success:
raise Exception(response.message)
def wait_for_frame(timeout = 5.0):
try:
service = rospy.ServiceProxy('wait_for_frame',WaitForFrame)
rospy.loginfo("Getting Captured Flame. . .")
response = service(timeout)
if response.success:
rospy.loginfo("Successfully Getting Captured Frame.")
else:
rospy.logerr(response.message)
except rospy.ServiceException, e:
rospy.logerr("Failed to get Captured Flame.")
raise
else:
if not response.success:
raise Exception(response.message)
return response
def update_frame(timeout = 5.0):
try:
service = rospy.ServiceProxy('update_frame',WaitForFrame)
rospy.loginfo("Getting Captured Flame. . .")
response = service(timeout)
if response.success:
rospy.loginfo("Successfully Getting Captured Frame.")
else:
rospy.logerr(response.message)
except rospy.ServiceException, e:
rospy.logerr("Failed to get Captured Flame.")
raise
else:
if not response.success:
raise Exception(response.message)
return response
def stop():
try:
service = rospy.ServiceProxy('stop',Stop)
rospy.loginfo("Stopping Capturing...")
response = service()
if response.success:
rospy.loginfo('Successful Stoping Capturing')
else:
rospy.logerr(response.message)
except rospy.ServiceException, e:
rospy.logerr("Failed to Stop Capturing.")
raise
else:
if not response.success:
raise Exception(response.message)
def get_intrinsics():
try:
service = rospy.ServiceProxy('get_intrinsics', GetIntrinsics)
rospy.loginfo('Loading Current Camera Parameter . . .')
response = service()
if response.success:
rospy.loginfo('Loading Completed.')
else:
rospy.logerr(response.message)
except rospy.ServiceException, e:
rospy.logerr("Failed to Loading Camera Parameter. Plese Check Contrtoller Status.")
raise
else:
if not response.success:
raise Exception(response.message)
return response
def update_intrinsics():
try:
service = rospy.ServiceProxy('update_intrinsics', GetIntrinsics)
rospy.loginfo('Update Camera Parameter . . .')
response = service()
if response.success:
rospy.loginfo('Loading Completed.')
else:
rospy.logerr(response.message)
except rospy.ServiceException, e:
rospy.logerr("Failed to Update Camera Parameter. Plese Check Contrtoller Status.")
raise
else:
if not response.success:
raise Exception(response.message)
return response | StarcoderdataPython |
1766506 | from __future__ import absolute_import
from sentry.testutils import TestCase
class StaticMediaTest(TestCase):
def test_basic(self):
url = '/_static/sentry/app/index.js'
response = self.client.get(url)
assert response.status_code == 200, response
assert 'Cache-Control' not in response
assert 'Vary' not in response
assert response['Access-Control-Allow-Origin'] == '*'
def test_versioned(self):
url = '/_static/1234567890/sentry/app/index.js'
response = self.client.get(url)
assert response.status_code == 200, response
assert 'Cache-Control' in response
assert 'Vary' not in response
assert response['Access-Control-Allow-Origin'] == '*'
url = '/_static/a43db3b08ddd4918972f80739f15344b/sentry/app/index.js'
response = self.client.get(url)
assert response.status_code == 200, response
assert 'Cache-Control' in response
assert 'Vary' not in response
assert response['Access-Control-Allow-Origin'] == '*'
def test_no_cors(self):
url = '/_static/sentry/images/favicon.ico'
response = self.client.get(url)
assert response.status_code == 200, response
assert 'Cache-Control' not in response
assert 'Vary' not in response
assert 'Access-Control-Allow-Origin' not in response
| StarcoderdataPython |
21009 | <reponame>PaulWichser/adventofcode
import fileimp
# divide rows 0-127
# F = lower half
# B = upper half
# divide columns 0-7
# R = upper half
# L = lower half
# seat ID = row * 8 + col
# list of IDs
# max list
def idcalc(list):
seats = []
for i in list:
row = ''
col = ''
for j in i:
if j == 'F':
row = row + '0'
elif j == 'B':
row = row + '1'
elif j == 'R':
col = col + '1'
elif j == 'L':
col = col + '0'
else:
print("something went wrong in rows & cols")
quit()
print(row, col)
# row = row[::-1]
# col = col[::-1]
print(row, col)
row = int(row, 2)
col = int(col, 2)
print(row, col)
seats.append((row * 8) + col)
print(seats)
return seats
testlist = fileimp.listimp("d05_test.txt")
if max(idcalc(testlist)) != 820:
print("Test Failed!")
quit()
seatlist = fileimp.listimp("d05_input.txt")
print("Largest seat ID = ", max(idcalc(seatlist)))
| StarcoderdataPython |
1680495 | <gh_stars>1-10
# encoding: UTF-8
# api: streamtuner2
# title: Compound★
# description: combines station lists from multiple channels
# version: 0.2
# type: channel
# category: virtual
# url: http://fossil.include-once.org/streamtuner2/
# config: -
# { name: compound_channels, type: text, value: "shoutcast, internet_radio, xiph, surfmusik", description: "Channels to merge station lists from." }
# { name: compound_genres, type: text, value: "Top 40", description: "Extract specific/favourite categories." }
# { name: compound_intersect, type: boolean, value: 1, description: "Intersect genres which exist in 2 or more channels." }
# priority: unsupported
# png:
# iVBORw0KGgoAAAANSUhEUgAAABQAAAASCAYAAABb0P4QAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAAB<KEY>AAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH3wUFCigotf34ngAABGlJREFUOMttj++LVFUchz/f7znn3jtzZ+buzuzszv4SbbdVUTNp
# o8DqXUFsUEFBBEFSr/ov8h8JJOhNWlFQhggJYWKaGZrruGW26e44O7/nztx77jmnF5b2oufl58XD86HuG7uQGzTRzU1i6tQmjh8/vhxF0dtpmq612+2VZrOZ77Tb/f5gcGVnZ+dEvV7/9MyZM3GtVoPWGkopZFmG3bt3AwBkxrn9iSq9yiLX+PqLz0qrq6vH+v3+wfX1de50OvH09HS2uLhYjOP4aKVSebZQKDwfBEFda50YY7aJaNkYcw7AOQCQ
# <KEY>
# <KEY>
# <KEY>
# <KEY>
# pJ4jjjzbX4ixXb9Rz+TOmMZ9QbeLDm0HYGSoZ9ndaloSCeFOCoTaUiED/tjJWGSgjdhS1wFUYLfVMuTSnKk/tuna84vzTnpXvXPlwHur2AoIAJJ8JoqdQPOmVPmBl2nPUiYN53uepr+UKnR9HeQD4eAQDFVGZanUgNu/bdfHpy9+h78Bc2RGfJQqXS8AAAAldEVYdGRhdGU6Y3JlYXRlADIwMTUtMDUtMDVUMTI6NDA6MTIrMDI6MDDJlQYgAAAA
# J<KEY>
# png-orig: https://openclipart.org/detail/215936/audio
#
# Use this plugin to mix categories and their station entries from two
# or more different directory channels. It merges the lists, albeit in
# a simplistic way.
#
# Per default it lists only selected categories. But can be configured to
# merge just intersectioning categories/genres. Entry order is determined
# from station occourence count in channels AND their individual listener
# count (where available) using some guesswork to eliminate duplicates.
from channels import *
import action
from config import conf
# Merges categories from different channels
class compound (ChannelPlugin):
# runtime options
has_search = False
listformat = "href" # row entries will contain exact `listformat` classification
audioformat = "audio/*" # same as for correct encoding mime type
# references
parent = None
# data
streams = {}
categories = []
# Which categories
def update_categories(self):
# As-is category list
cats = self.split(conf.compound_genres)
self.categories = [c for c in cats if c != "x"]
# Genre intersection requested
if conf.compound_intersect:
once = []
for chan in self.channels():
for add in self.flatten(self.parent.channels[chan].categories):
# second occourence in two channels
if add.lower() in once:
if add not in self.categories:
self.categories.append(add)
else: #if add not in self.categories:
once.append(add.lower())
# flatten our two-level categories list
def flatten(self, a):
return [i for sub in a for i in (sub if type(sub)==list else [sub])]
# break up name lists
def split(self, s):
return [s.strip() for s in s.split(",")]
# List of channels
def channels(self):
# get list
ls = self.split(conf.compound_channels)
# resolve "*"
if "*" in ls:
ls = self.parent.channel_names # includes bookmarks
if self.module in ls:
ls.remove(self.module) # but not compound
return ls
# Combine stream lists
def update_streams(self, cat):
r = []
have = []
# Look through channels
if cat in self.categories:
for cn in self.channels():
# Get channel, refresh list
c = self.parent.channels.get(cn)
if not cn:
continue # skip misnamed pugins
#
for row in self.get_streams(c, cat):
# copy
row = dict(row)
#row["listeners"] = 1000 + row.get("listeners", 0) / 10
row["extra"] = cn # or genre?
row["listformat"] = c.listformat
# duplicates
if row["title"].lower() in have or row["url"] in have:
for i,cmp in enumerate(r):
if cmp["title"].lower()==row["title"].lower() or cmp["url"].find(row["url"])>=0:
r[i]["listeners"] = row.get("listeners",0) + 5000
pass
else:
r.append(row)
have.append(row["title"].lower()) # we're comparing lowercase titles
have.append(row["url"][:row["url"].find("http://")]) # save last http:// part (internet-radio redirects to shoutcast urls)
# sort by listeners
r = sorted(r, key=lambda x: -x.get("listeners", 0))
return r
# extract station list from other channel plugin
def get_streams(self, c, cat):
# if empty?
#c.load(cat)
return c.streams.get(cat) \
or c.update_streams(cat.replace(" ","")) \
or []
| StarcoderdataPython |
1609125 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The Maximum Likelihood Amplitude Estimation algorithm."""
from typing import Optional, List, Union, Tuple
import logging
import numpy as np
from scipy.optimize import brute
from scipy.stats import norm, chi2
from qiskit import ClassicalRegister, QuantumRegister, QuantumCircuit
from qiskit.aqua import AquaError
from qiskit.aqua.utils.circuit_factory import CircuitFactory
from qiskit.aqua.utils.validation import validate_min
from .ae_algorithm import AmplitudeEstimationAlgorithm
logger = logging.getLogger(__name__)
# pylint: disable=invalid-name
class MaximumLikelihoodAmplitudeEstimation(AmplitudeEstimationAlgorithm):
"""The Maximum Likelihood Amplitude Estimation algorithm.
This class implements the an quantum amplitude estimation (QAE) algorithm without phase
estimation, according to https://arxiv.org/abs/1904.10246. In comparison to the original
QAE algorithm (https://arxiv.org/abs/quant-ph/0005055), this implementation relies solely
on different powers of the Grover algorithm and does not require ancilla qubits.
Finally, the estimate is determined via a maximum likelihood estimation, which is why this
class in named MaximumLikelihoodAmplitudeEstimation.
"""
def __init__(self, num_oracle_circuits: int,
a_factory: Optional[CircuitFactory] = None,
q_factory: Optional[CircuitFactory] = None,
i_objective: Optional[int] = None,
likelihood_evals: Optional[int] = None) -> None:
r"""
Args:
num_oracle_circuits: The number of circuits applying different powers of the Grover
oracle Q. The (`num_oracle_circuits` + 1) executed circuits will be
`[id, Q^2^0, ..., Q^2^{num_oracle_circuits-1}] A |0>`, where A is the problem
unitary encoded in the argument `a_factory`.
Has a minimum value of 1.
a_factory: The CircuitFactory subclass object representing the problem unitary.
q_factory: The CircuitFactory subclass object representing.
an amplitude estimation sample (based on a_factory)
i_objective: The index of the objective qubit, i.e. the qubit marking 'good' solutions
with the state \|1> and 'bad' solutions with the state \|0>
likelihood_evals: The number of gridpoints for the maximum search of the likelihood
function
"""
validate_min('num_oracle_circuits', num_oracle_circuits, 1)
super().__init__(a_factory, q_factory, i_objective)
# get parameters
self._evaluation_schedule = [0] + [2**j for j in range(num_oracle_circuits)]
self._likelihood_evals = likelihood_evals
# default number of evaluations is max(10^5, pi/2 * 10^3 * 2^(m))
if likelihood_evals is None:
default = 10000
self._likelihood_evals = np.maximum(default,
int(np.pi / 2 * 1000 * 2 ** num_oracle_circuits))
self._circuits = []
self._ret = {}
@property
def _num_qubits(self) -> int:
"""Return the number of qubits needed in the circuit.
Returns:
The total number of qubits.
"""
if self.a_factory is None: # if A factory is not set, no qubits are specified
return 0
num_ancillas = self.q_factory.required_ancillas()
num_qubits = self.a_factory.num_target_qubits + num_ancillas
return num_qubits
def construct_circuits(self, measurement: bool = False) -> List[QuantumCircuit]:
"""Construct the Amplitude Estimation w/o QPE quantum circuits.
Args:
measurement: Boolean flag to indicate if measurement should be included in the circuits.
Returns:
A list with the QuantumCircuit objects for the algorithm.
"""
# keep track of the Q-oracle queries
self._ret['num_oracle_queries'] = 0
# construct first part of circuit
q = QuantumRegister(self.a_factory.num_target_qubits, 'q')
qc_0 = QuantumCircuit(q, name='qc_a') # 0 applications of Q, only a single A operator
# get number of ancillas
num_ancillas = np.maximum(self.a_factory.required_ancillas(),
self.q_factory.required_ancillas())
q_aux = None
# pylint: disable=comparison-with-callable
if num_ancillas > 0:
q_aux = QuantumRegister(num_ancillas, 'aux')
qc_0.add_register(q_aux)
# add classical register if needed
if measurement:
c = ClassicalRegister(1)
qc_0.add_register(c)
self.a_factory.build(qc_0, q, q_aux)
self._circuits = []
for k in self._evaluation_schedule:
qc_k = qc_0.copy(name='qc_a_q_%s' % k)
if k != 0:
self.q_factory.build_power(qc_k, q, k, q_aux)
if measurement:
# real hardware can currently not handle operations after measurements, which might
# happen if the circuit gets transpiled, hence we're adding a safeguard-barrier
qc_k.barrier()
qc_k.measure(q[self.i_objective], c[0])
self._circuits += [qc_k]
return self._circuits
def _evaluate_statevectors(self,
statevectors: Union[List[List[complex]], List[np.ndarray]]
) -> List[float]:
"""For each statevector compute the probability that |1> is measured in the objective qubit.
Args:
statevectors: A list of statevectors.
Returns:
The corresponding probabilities.
"""
probabilities = []
for sv in statevectors:
p_k = 0
for i, a in enumerate(sv):
p = np.abs(a)**2
b = ('{0:%sb}' % self._num_qubits).format(i)[::-1]
if b[self.i_objective] == '1':
p_k += p
probabilities += [p_k]
return probabilities
def _get_hits(self) -> Tuple[List[int], List[int]]:
"""Get the good and total counts.
Returns:
A pair of two lists, ([1-counts per experiment], [shots per experiment]).
Raises:
AquaError: If self.run() has not been called yet.
"""
one_hits = [] # h_k: how often 1 has been measured, for a power Q^(m_k)
all_hits = [] # N_k: how often has been measured at a power Q^(m_k)
try:
if self.quantum_instance.is_statevector:
probabilities = self._evaluate_statevectors(self._ret['statevectors'])
one_hits = probabilities
all_hits = np.ones_like(one_hits)
else:
for c in self._ret['counts']:
one_hits += [c.get('1', 0)] # return 0 if no key '1' found
all_hits += [sum(c.values())]
except KeyError:
raise AquaError('Call run() first!')
return one_hits, all_hits
def _safe_min(self, array, default=0):
if len(array) == 0:
return default
return np.min(array)
def _safe_max(self, array, default=(np.pi / 2)):
if len(array) == 0:
return default
return np.max(array)
def _compute_fisher_information(self, a: Optional[float] = None,
num_sum_terms: Optional[int] = None,
observed: bool = False) -> float:
"""Compute the Fisher information.
Args:
a: The amplitude `a`. Can be omitted if `run` was called already, then the estimate
of the algorithm is used.
num_sum_terms: The number of sum terms to be included in the calculation of the
Fisher information. By default all values are included.
observed: If True, compute the observed Fisher information, otherwise the theoretical
one.
Returns:
The computed Fisher information, or np.inf if statevector simulation was used.
Raises:
KeyError: Call run() first!
"""
# Set the value a. Use `est_a` if provided.
if a is None:
try:
a = self._ret['value']
except KeyError:
raise KeyError('Call run() first!')
# Corresponding angle to the value a (only use real part of 'a')
theta_a = np.arcsin(np.sqrt(np.real(a)))
# Get the number of hits (Nk) and one-hits (hk)
one_hits, all_hits = self._get_hits()
# Include all sum terms or just up to a certain term?
evaluation_schedule = self._evaluation_schedule
if num_sum_terms is not None:
evaluation_schedule = evaluation_schedule[:num_sum_terms]
# not necessary since zip goes as far as shortest list:
# all_hits = all_hits[:num_sum_terms]
# one_hits = one_hits[:num_sum_terms]
# Compute the Fisher information
fisher_information = None
if observed:
# Note, that the observed Fisher information is very unreliable in this algorithm!
d_logL = 0
for Nk, hk, mk in zip(all_hits, one_hits, evaluation_schedule):
tan = np.tan((2 * mk + 1) * theta_a)
d_logL += (2 * mk + 1) * (hk / tan + (Nk - hk) * tan)
d_logL /= np.sqrt(a * (1 - a))
fisher_information = d_logL**2 / len(all_hits)
else:
fisher_information = \
1 / (a * (1 - a)) * sum(Nk * (2 * mk + 1)**2 for Nk, mk in zip(all_hits,
evaluation_schedule))
return fisher_information
def _fisher_confint(self, alpha: float = 0.05, observed: bool = False) -> List[float]:
"""Compute the `alpha` confidence interval based on the Fisher information.
Args:
alpha: The level of the confidence interval (must be <= 0.5), default to 0.05.
observed: If True, use observed Fisher information.
Returns:
float: The alpha confidence interval based on the Fisher information
Raises:
AssertionError: Call run() first!
"""
# Get the (observed) Fisher information
fisher_information = None
try:
fisher_information = self._ret['fisher_information']
except KeyError:
raise AssertionError("Call run() first!")
if observed:
fisher_information = self._compute_fisher_information(observed=True)
normal_quantile = norm.ppf(1 - alpha / 2)
confint = np.real(self._ret['value']) + \
normal_quantile / np.sqrt(fisher_information) * np.array([-1, 1])
mapped_confint = [self.a_factory.value_to_estimation(bound) for bound in confint]
return mapped_confint
def _likelihood_ratio_confint(self, alpha: float = 0.05,
nevals: Optional[int] = None) -> List[float]:
"""Compute the likelihood-ratio confidence interval.
Args:
alpha: The level of the confidence interval (< 0.5), defaults to 0.05.
nevals: The number of evaluations to find the intersection with the loglikelihood
function. Defaults to an adaptive value based on the maximal power of Q.
Returns:
The alpha-likelihood-ratio confidence interval.
"""
if nevals is None:
nevals = self._likelihood_evals
def loglikelihood(theta, one_counts, all_counts):
logL = 0
for i, k in enumerate(self._evaluation_schedule):
logL += np.log(np.sin((2 * k + 1) * theta) ** 2) * one_counts[i]
logL += np.log(np.cos((2 * k + 1) * theta) ** 2) * (all_counts[i] - one_counts[i])
return logL
one_counts, all_counts = self._get_hits()
eps = 1e-15 # to avoid invalid value in log
thetas = np.linspace(0 + eps, np.pi / 2 - eps, nevals)
values = np.zeros(len(thetas))
for i, t in enumerate(thetas):
values[i] = loglikelihood(t, one_counts, all_counts)
loglik_mle = loglikelihood(self._ret['theta'], one_counts, all_counts)
chi2_quantile = chi2.ppf(1 - alpha, df=1)
thres = loglik_mle - chi2_quantile / 2
# the (outer) LR confidence interval
above_thres = thetas[values >= thres]
# it might happen that the `above_thres` array is empty,
# to still provide a valid result use safe_min/max which
# then yield [0, pi/2]
confint = [self._safe_min(above_thres, default=0),
self._safe_max(above_thres, default=(np.pi / 2))]
mapped_confint = [self.a_factory.value_to_estimation(np.sin(bound)**2) for bound in confint]
return mapped_confint
def confidence_interval(self, alpha: float, kind: str = 'fisher') -> List[float]:
# pylint: disable=wrong-spelling-in-docstring
"""Compute the `alpha` confidence interval using the method `kind`.
The confidence level is (1 - `alpha`) and supported kinds are 'fisher',
'likelihood_ratio' and 'observed_fisher' with shorthand
notations 'fi', 'lr' and 'oi', respectively.
Args:
alpha: The confidence level.
kind: The method to compute the confidence interval. Defaults to 'fisher', which
computes the theoretical Fisher information.
Returns:
The specified confidence interval.
Raises:
AquaError: If `run()` hasn't been called yet.
NotImplementedError: If the method `kind` is not supported.
"""
# check if AE did run already
if 'estimation' not in self._ret.keys():
raise AquaError('Call run() first!')
# if statevector simulator the estimate is exact
if self._quantum_instance.is_statevector:
return 2 * [self._ret['estimation']]
if kind in ['likelihood_ratio', 'lr']:
return self._likelihood_ratio_confint(alpha)
if kind in ['fisher', 'fi']:
return self._fisher_confint(alpha, observed=False)
if kind in ['observed_fisher', 'observed_information', 'oi']:
return self._fisher_confint(alpha, observed=True)
raise NotImplementedError('CI `{}` is not implemented.'.format(kind))
def _compute_mle_safe(self):
"""Compute the MLE via a grid-search.
This is a stable approach if sufficient gridpoints are used.
"""
one_hits, all_hits = self._get_hits()
# search range
eps = 1e-15 # to avoid invalid value in log
search_range = [0 + eps, np.pi / 2 - eps]
def loglikelihood(theta):
# logL contains the first `it` terms of the full loglikelihood
logL = 0
for i, k in enumerate(self._evaluation_schedule):
logL += np.log(np.sin((2 * k + 1) * theta) ** 2) * one_hits[i]
logL += np.log(np.cos((2 * k + 1) * theta) ** 2) * (all_hits[i] - one_hits[i])
return -logL
est_theta = brute(loglikelihood, [search_range], Ns=self._likelihood_evals)[0]
return est_theta
def _run_mle(self) -> float:
"""Compute the maximum likelihood estimator (MLE) for the angle theta.
Returns:
The MLE for the angle theta, related to the amplitude a via a = sin^2(theta)
"""
# TODO implement a **reliable**, fast method to find the maximum of the likelihood function
return self._compute_mle_safe()
def _run(self) -> dict:
# check if A factory has been set
if self.a_factory is None:
raise AquaError("a_factory must be set!")
if self._quantum_instance.is_statevector:
# run circuit on statevector simulator
self.construct_circuits(measurement=False)
ret = self._quantum_instance.execute(self._circuits)
# get statevectors and construct MLE input
statevectors = [np.asarray(ret.get_statevector(circuit)) for circuit in self._circuits]
self._ret['statevectors'] = statevectors
# to count the number of Q-oracle calls (don't count shots)
shots = 1
else:
# run circuit on QASM simulator
self.construct_circuits(measurement=True)
ret = self._quantum_instance.execute(self._circuits)
# get counts and construct MLE input
self._ret['counts'] = [ret.get_counts(circuit) for circuit in self._circuits]
# to count the number of Q-oracle calls
shots = self._quantum_instance._run_config.shots
# run maximum likelihood estimation and construct results
self._ret['theta'] = self._run_mle()
self._ret['value'] = np.sin(self._ret['theta'])**2
self._ret['estimation'] = self.a_factory.value_to_estimation(self._ret['value'])
self._ret['fisher_information'] = self._compute_fisher_information()
self._ret['num_oracle_queries'] = shots * sum(k for k in self._evaluation_schedule)
confidence_interval = self._fisher_confint(alpha=0.05)
self._ret['95%_confidence_interval'] = confidence_interval
return self._ret
| StarcoderdataPython |
102260 | from atcodertools.fmtprediction.models.calculator import CalcNode
class Index:
"""
The model to store index information of a variable, which has a likely the minimal / maximal value and for each dimension.
Up to 2 indices are now supported.
In most cases, the minimal value is 1 and the maximal value is some variable like N.
"""
def __init__(self):
self.min_index = None
self.max_index = None
def update(self, new_value: str):
self._update_min(new_value)
self._update_max(new_value)
def get_length(self):
assert self.max_index is not None
assert self.min_index is not None
return CalcNode.parse(
"{max_index}-({min_index})+1".format(
max_index=self.max_index,
min_index=self.min_index)
).simplify()
def _update_min(self, new_value: str):
if not new_value.isdecimal():
# consider variable is not always could not be minimal.
return
if (self.min_index is None) or (self.min_index.evaluate() > CalcNode.parse(new_value).evaluate()):
self.min_index = CalcNode.parse(new_value)
def _update_max(self, new_value: str):
if not new_value.isdecimal():
self.max_index = CalcNode.parse(new_value)
if (self.max_index is None) or (
len(self.max_index.get_all_variables()) == 0 and self.max_index.evaluate() < CalcNode.parse(
new_value).evaluate()
):
self.max_index = CalcNode.parse(new_value)
| StarcoderdataPython |
40676 | #!/usr/bin/env python3
import glob
import json
import xml.dom.minidom as minidom
import json
install = minidom.parse('build/install.rdf')
ta = install.getElementsByTagNameNS('*', 'targetApplication')[0]
with open('schema/supported.json') as f:
min_version = json.load(f)
for client, version in min_version.items():
client = {'zotero': '<EMAIL>', 'jurism': '<EMAIL>' }[client]
_id = next(node for node in ta.getElementsByTagNameNS('*', 'id') if node.firstChild.nodeValue == client)
for node in _id.parentNode.getElementsByTagNameNS('*', 'minVersion'):
node.firstChild.replaceWholeText(version)
print('minimum', client, 'version', version)
with open('build/install.rdf', 'w') as f:
install.writexml(f)
| StarcoderdataPython |
157535 | <reponame>chsong513/TransferLearning
from .jda import * | StarcoderdataPython |
41430 | <filename>custom-actions/actions/sql_query.py
import sqlite3
from datetime import datetime
database = "../rasa.db"
# Parameter: Database pointer, sql command, and the data used for the command
# Function: Run the sql command
def run_sql_command(cursor, sql_command, data):
try:
if data is not None:
cursor.execute(sql_command, data)
else:
cursor.execute(sql_command)
record = cursor.fetchall()
return record
except sqlite3.Error as error:
print(
"\nError while running this command: \n", sql_command, "\n", error, "\n",
)
return None
# Function: Add a new search entry in the database
def add_new_search_query(
conversation_id, keywords_user, flag_activate_sql_query_commit
):
date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
try:
sqliteConnection = sqlite3.connect(database)
cursor = sqliteConnection.cursor()
sqlite_insert_feedback_query = (
"INSERT INTO search(conversation_id, keywords_user, date) VALUES(?, ?, ?);"
)
run_sql_command(
cursor,
sqlite_insert_feedback_query,
(conversation_id, keywords_user, date),
)
if flag_activate_sql_query_commit:
sqliteConnection.commit()
cursor.close()
sqliteConnection.close()
except sqlite3.Error as error:
print("-ADD_NEW_SEARCH_QUERY-\nError while connecting to sqlite", error, "\n")
# Function: Add the keyword proposed in the database
def add_keyword_proposed(
conversation_id,
keywords_user,
keyword_proposed,
feedback,
flag_activate_sql_query_commit,
):
try:
sqliteConnection = sqlite3.connect(database)
cursor = sqliteConnection.cursor()
search_id = get_search_id(conversation_id, keywords_user)
if search_id is not None:
sqlite_check_keyword_proposed_exist_query = "SELECT id FROM search_augmentation WHERE search_id = ? and keyword_proposed = ?"
record = run_sql_command(
cursor,
sqlite_check_keyword_proposed_exist_query,
(search_id, keyword_proposed),
)
if len(record) > 0:
augmentation_id = record[0][0]
sqlite_update_result_query = (
"UPDATE search_augmentation SET feedback = ? WHERE id = ?"
)
run_sql_command(
cursor, sqlite_update_result_query, (feedback, augmentation_id)
)
else:
sqlite_insert_result_query = "INSERT INTO search_augmentation(search_id, keyword_proposed, feedback) VALUES(?, ?, ?);"
run_sql_command(
cursor,
sqlite_insert_result_query,
(search_id, keyword_proposed, feedback),
)
if flag_activate_sql_query_commit:
sqliteConnection.commit()
cursor.close()
sqliteConnection.close()
except sqlite3.Error as error:
print(
"-ADD_FEEDBACK_AUGMENTATION-\nError while connecting to sqlite", error, "\n"
)
# Parameter: result_data = (results_title, results_url)
# Function: Add the results of a query in the database
def add_result(
conversation_id,
keywords_user,
result_data,
feedback,
flag_activate_sql_query_commit,
):
try:
sqliteConnection = sqlite3.connect(database)
cursor = sqliteConnection.cursor()
search_id = get_search_id(conversation_id, keywords_user)
if search_id is not None:
sqlite_check_result_exist_query = "SELECT id FROM search_results WHERE search_id = ? and result_title = ? and result_url = ?"
record = run_sql_command(
cursor,
sqlite_check_result_exist_query,
(search_id, result_data[0], result_data[1]),
)
if len(record) > 0:
result_id = record[0][0]
sqlite_update_result_query = (
"UPDATE search_results SET feedback = ? WHERE id = ?"
)
run_sql_command(
cursor, sqlite_update_result_query, (feedback, result_id)
)
else:
sqlite_insert_result_query = "INSERT INTO search_results(search_id, result_title, result_url, feedback) VALUES(?, ?, ?, ?);"
run_sql_command(
cursor,
sqlite_insert_result_query,
(search_id, result_data[0], result_data[1], feedback),
)
if flag_activate_sql_query_commit:
sqliteConnection.commit()
cursor.close()
sqliteConnection.close()
except sqlite3.Error as error:
print("-ADD_FEEDBACK_RESULTS-\nError while connecting to sqlite", error, "\n")
# Return the search_id corresponding to these parameters
def get_search_id(conversation_id, keywords_user):
try:
sqliteConnection = sqlite3.connect(database)
cursor = sqliteConnection.cursor()
sqlite_get_search_id_query = "SELECT id FROM search where conversation_id = ? and keywords_user = ? ORDER BY id DESC;"
record = run_sql_command(
cursor, sqlite_get_search_id_query, (conversation_id, keywords_user)
)
if len(record) > 0:
return record[0][0]
else:
return None
except sqlite3.Error as error:
print("-GET_SEARCH_ID-\nError while connecting to sqlite", error, "\n")
| StarcoderdataPython |
193013 | <filename>lib/third_party/ml_sdk/cloud/ml/prediction/frameworks/tf_prediction_lib.py
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for running predictions for TF framework.
Note that we avoid importing tensorflow and tensorflow.contrib at the top.
This is because this module gets loaded for other frameworks as well,
and loading xgboost after tensorflow.contrib causes an error.
More context: b/71906188#comment20.
"""
import base64
import collections
import logging
import os
from .. import prediction_utils
from .._interfaces import PredictionClient
import numpy as np
from ..prediction_utils import PredictionError
import six
import tensorflow as tf
# pylint: disable=g-import-not-at-top
# Conditionally import files based on whether this is TF 2.x or TF 1.x.
# A direct check for tf.__version__ fails in some cases, so using the
# hammer of `try`/`catch` blocks instead.
try:
# tf.dtypes and tf.compat weren't added until later versions of TF.
# These imports and constants work for all TF 1.X.
from tensorflow.python.util import compat # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import dtypes # pylint: disable=g-direct-tensorflow-import
SERVING = tf.saved_model.tag_constants.SERVING
DEFAULT_SERVING_SIGNATURE_DEF_KEY = (
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
# Force Tensorflow contrib to load in order to provide access to all the
# libraries in contrib to batch prediction (also, when using SESSION_RUN
# instead of MODEL_SERVER for online prediction, which we no longer do).
# However, contrib is no longer a part of TensorFlow 2.0, so check for its
# existence first.
try:
import tensorflow.contrib # pylint: disable=unused-import
# TF 1.15 introduced lazy loading for tensorflow.contrib, but doing
# a dir forces it to load.
dir(tensorflow.contrib)
except: # pylint: disable=bare-except
pass
except: # pylint: disable=bare-except
import tensorflow.compat.v1 as tf
from tensorflow import dtypes
from tensorflow import compat
SERVING = tf.saved_model.SERVING
DEFAULT_SERVING_SIGNATURE_DEF_KEY = (
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
tf.disable_v2_behavior()
# pylint: enable=g-import-not-at-top
# --------------------------
# prediction.frameworks.tf_prediction_lib
# --------------------------
_CUSTOM_OP_DIRECTORY_NAME = "assets.extra"
_CUSTOM_OP_SUFFIX = "*.so"
_CUSTOM_OP_LOCAL_DIR = "/tmp/custom_ops/"
def columnarize(instances):
"""Columnarize inputs.
Each line in the input is a dictionary of input names to the value
for that input (a single instance). For each input "column", this method
appends each of the input values to a list. The result is a dict mapping
input names to a batch of input data. This can be directly used as the
feed dict during prediction.
For example,
instances = [{"a": [1.0, 2.0], "b": "a"},
{"a": [3.0, 4.0], "b": "c"},
{"a": [5.0, 6.0], "b": "e"},]
batch = prediction_server_lib.columnarize(instances)
assert batch == {"a": [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
"b": ["a", "c", "e"]}
Arguments:
instances: (list of dict) where the dictionaries map input names
to the values for those inputs.
Returns:
A dictionary mapping input names to values, as described above.
"""
columns = collections.defaultdict(list)
for instance in instances:
for k, v in six.iteritems(instance):
columns[k].append(v)
return columns
def rowify(columns):
"""Converts columnar input to row data.
Consider the following code:
columns = {"prediction": np.array([1, # 1st instance
0, # 2nd
1]), # 3rd
"scores": np.array([[0.1, 0.9], # 1st instance
[0.7, 0.3], # 2nd
[0.4, 0.6]])} # 3rd
Then rowify will return the equivalent of:
[{"prediction": 1, "scores": [0.1, 0.9]},
{"prediction": 0, "scores": [0.7, 0.3]},
{"prediction": 1, "scores": [0.4, 0.6]}]
(each row is yielded; no list is actually created).
Arguments:
columns: (dict) mapping names to numpy arrays, where the arrays
contain a batch of data.
Raises:
PredictionError: if the outer dimension of each input isn't identical
for each of element.
Yields:
A map with a single instance, as described above. Note: instances
is not a numpy array.
"""
sizes_set = {e.shape[0] for e in six.itervalues(columns)}
# All the elements in the length array should be identical. Otherwise,
# raise an exception.
if len(sizes_set) != 1:
sizes_dict = {name: e.shape[0] for name, e in six.iteritems(columns)}
raise PredictionError(
PredictionError.INVALID_OUTPUTS,
"Bad output from running tensorflow session: outputs had differing "
"sizes in the batch (outer) dimension. See the outputs and their "
"size: %s. Check your model for bugs that effect the size of the "
"outputs." % sizes_dict)
# Pick an arbitrary value in the map to get its size.
num_instances = len(next(six.itervalues(columns)))
for row in six.moves.xrange(num_instances):
yield {
name: output[row, ...].tolist()
for name, output in six.iteritems(columns)
}
def canonicalize_single_tensor_input(instances, tensor_name):
"""Canonicalize single input tensor instances into list of dicts.
Instances that are single input tensors may or may not be provided with their
tensor name. The following are both valid instances:
1) instances = [{"x": "a"}, {"x": "b"}, {"x": "c"}]
2) instances = ["a", "b", "c"]
This function canonicalizes the input instances to be of type 1).
Arguments:
instances: single input tensor instances as supplied by the user to the
predict method.
tensor_name: the expected name of the single input tensor.
Raises:
PredictionError: if the wrong tensor name is supplied to instances.
Returns:
A list of dicts. Where each dict is a single instance, mapping the
tensor_name to the value (as supplied by the original instances).
"""
# Input is a single string tensor, the tensor name might or might not
# be given.
# There are 3 cases (assuming the tensor name is "t", tensor = "abc"):
# 1) {"t": "abc"}
# 2) "abc"
# 3) {"y": ...} --> wrong tensor name is given.
def parse_single_tensor(x, tensor_name):
if not isinstance(x, dict):
# case (2)
return {tensor_name: x}
elif len(x) == 1 and tensor_name == list(x.keys())[0]:
# case (1)
return x
else:
raise PredictionError(PredictionError.INVALID_INPUTS,
"Expected tensor name: %s, got tensor name: %s." %
(tensor_name, list(x.keys())))
if not isinstance(instances, list):
instances = [instances]
instances = [parse_single_tensor(x, tensor_name) for x in instances]
return instances
# TODO(b/34686738): when we no longer load the model to get the signature
# consider making this a named constructor on SessionClient.
def load_tf_model(model_path,
tags=(SERVING,),
config=None):
"""Loads the model at the specified path.
Args:
model_path: the path to either session_bundle or SavedModel
tags: the tags that determines the model to load.
config: tf.ConfigProto containing session configuration options.
Returns:
A pair of (Session, map<string, SignatureDef>) objects.
Raises:
PredictionError: if the model could not be loaded.
"""
_load_tf_custom_op(model_path)
if tf.saved_model.loader.maybe_saved_model_directory(model_path):
try:
logging.info("Importing tensorflow.contrib in load_tf_model")
if tf.__version__.startswith("1.0"):
session = tf.Session(target="", graph=None, config=config)
else:
session = tf.Session(target="", graph=tf.Graph(), config=config)
meta_graph = tf.saved_model.loader.load(
session, tags=list(tags), export_dir=model_path)
except Exception as e: # pylint: disable=broad-except
msg = ("Failed to load the model due to bad model data. "
"tags: %s" % (list(tags),))
logging.exception(msg)
raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL,
"%s\n%s" % (msg, str(e)))
else:
raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL,
"Cloud ML only supports TF 1.0 or above and models "
"saved in SavedModel format.")
if session is None:
raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL,
"Failed to create session when loading the model")
if not meta_graph.signature_def:
raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL,
"MetaGraph must have at least one signature_def.")
# Remove invalid signatures from the signature map.
invalid_signatures = []
for signature_name in meta_graph.signature_def:
try:
signature = meta_graph.signature_def[signature_name]
_update_dtypes(session.graph, signature.inputs)
_update_dtypes(session.graph, signature.outputs)
except ValueError as e:
logging.warn("Error updating signature %s: %s", signature_name, str(e))
invalid_signatures.append(signature_name)
for signature_name in invalid_signatures:
del meta_graph.signature_def[signature_name]
return session, meta_graph.signature_def
def _update_dtypes(graph, interface):
"""Adds dtype to TensorInfos in interface if necessary.
If already present, validates TensorInfo matches values in the graph.
TensorInfo is updated in place.
Args:
graph: the TensorFlow graph; used to lookup datatypes of tensors.
interface: map from alias to TensorInfo object.
Raises:
ValueError: if the data type in the TensorInfo does not match the type
found in graph.
"""
for alias, info in six.iteritems(interface):
# Postpone conversion to enum for better error messages.
dtype = graph.get_tensor_by_name(info.name).dtype
if not info.dtype:
info.dtype = dtype.as_datatype_enum
elif info.dtype != dtype.as_datatype_enum:
raise ValueError("Specified data types do not match for alias %s. "
"Graph has %d while TensorInfo reports %d." %
(alias, dtype, info.dtype))
# (TODO:b/68775232): Move this to a Tensorflow specific library.
class TensorFlowClient(PredictionClient):
"""A client for Prediction that uses Session.run."""
def __init__(self, signature_map, *args, **kwargs):
self._signature_map = signature_map
super(TensorFlowClient, self).__init__(*args, **kwargs)
@property
def signature_map(self):
return self._signature_map
def get_signature(self, signature_name=None):
"""Gets tensorflow signature for the given signature_name.
Args:
signature_name: string The signature name to use to choose the signature
from the signature map.
Returns:
a pair of signature_name and signature. The first element is the
signature name in string that is actually used. The second one is the
signature.
Raises:
PredictionError: when the signature is not found with the given signature
name or when there are more than one signatures in the signature map.
"""
# The way to find signature is:
# 1) if signature_name is specified, try to find it in the signature_map. If
# not found, raise an exception.
# 2) if signature_name is not specified, check if signature_map only
# contains one entry. If so, return the only signature.
# 3) Otherwise, use the default signature_name and do 1).
if not signature_name and len(self.signature_map) == 1:
return (list(self.signature_map.keys())[0],
list(self.signature_map.values())[0])
key = (signature_name or DEFAULT_SERVING_SIGNATURE_DEF_KEY)
if key in self.signature_map:
return key, self.signature_map[key]
else:
raise PredictionError(
PredictionError.INVALID_INPUTS,
"No signature found for signature key %s." % signature_name)
class SessionClient(TensorFlowClient):
"""A client for Prediction that uses Session.run."""
def __init__(self, session, signature_map):
self._session = session
super(SessionClient, self).__init__(signature_map)
def predict(self, inputs, stats=None,
signature_name=None, **unused_kwargs):
"""Produces predictions for the given inputs.
Args:
inputs: a dict mapping input names to values
stats: Stats object for recording timing information.
signature_name: name of SignatureDef to use in this prediction
**unused_kwargs: placeholder, pre/postprocess may have additional args
Returns:
A dict mapping output names to output values, similar to the input
dict.
"""
stats = stats or prediction_utils.Stats()
stats[prediction_utils.ENGINE] = "SessionRun"
stats[
prediction_utils.FRAMEWORK] = prediction_utils.TENSORFLOW_FRAMEWORK_NAME
with stats.time(prediction_utils.UNALIAS_TIME):
_, signature = self.get_signature(signature_name)
fetches = [output.name for output in signature.outputs.values()]
try:
unaliased = {
signature.inputs[key].name: val
for key, val in six.iteritems(inputs)
}
except Exception as e: # pylint: disable=broad-except
logging.exception("Input mismatch.")
raise PredictionError(PredictionError.INVALID_INPUTS,
"Input mismatch: " + str(e))
with stats.time(prediction_utils.SESSION_RUN_TIME):
try:
# TODO(b/33849399): measure the actual session.run() time, even in the
# case of ModelServer.
outputs = self._session.run(fetches=fetches, feed_dict=unaliased)
except Exception as e: # pylint: disable=broad=except
logging.exception("Exception running the graph.")
raise PredictionError(PredictionError.FAILED_TO_RUN_MODEL,
"Exception during running the graph: " + str(e))
with stats.time(prediction_utils.ALIAS_TIME):
return dict(zip(six.iterkeys(signature.outputs), outputs))
class TensorFlowModel(prediction_utils.BaseModel):
"""The default implementation of the Model interface that uses TensorFlow.
This implementation optionally performs preprocessing and postprocessing
using the provided functions. These functions accept a single instance
as input and produce a corresponding output to send to the prediction
client.
"""
def _get_columns(self, instances, stats, signature):
"""Columnarize the instances, appending input_name, if necessary.
Instances are the same instances passed to the predict() method. Since
models with a single input can accept the raw input without the name,
we create a dict here with that name.
This list of instances is then converted into a column-oriented format:
The result is a dictionary mapping input name to a list of values for just
that input (one entry per row in the original instances list).
Args:
instances: the list of instances as provided to the predict() method.
stats: Stats object for recording timing information.
signature: SignatureDef for the current request.
Returns:
A dictionary mapping input names to their values.
Raises:
PredictionError: if an error occurs during prediction.
"""
with stats.time(prediction_utils.COLUMNARIZE_TIME):
columns = columnarize(instances)
for k, v in six.iteritems(columns):
if k not in signature.inputs.keys():
raise PredictionError(
PredictionError.INVALID_INPUTS,
"Unexpected tensor name: %s" % k)
# Detect whether or not the user omits an input in one or more inputs.
# TODO(b/34686738): perform this check in columnarize?
if isinstance(v, list) and len(v) != len(instances):
raise PredictionError(
PredictionError.INVALID_INPUTS,
"Input %s was missing in at least one input instance." % k)
return columns
# TODO(b/34686738): can this be removed?
def is_single_input(self, signature):
"""Returns True if the graph only has one input tensor."""
return len(signature.inputs) == 1
# TODO(b/34686738): can this be removed?
def is_single_string_input(self, signature):
"""Returns True if the graph only has one string input tensor."""
if self.is_single_input(signature):
dtype = list(signature.inputs.values())[0].dtype
return dtype == dtypes.string.as_datatype_enum
return False
def get_signature(self, signature_name=None):
return self._client.get_signature(signature_name)
def preprocess(self, instances, stats=None, signature_name=None, **kwargs):
_, signature = self.get_signature(signature_name)
preprocessed = self._canonicalize_input(instances, signature)
return self._get_columns(preprocessed, stats, signature)
def _canonicalize_input(self, instances, signature):
"""Preprocess single-input instances to be dicts if they aren't already."""
# The instances should be already (b64-) decoded here.
if not self.is_single_input(signature):
return instances
tensor_name = list(signature.inputs.keys())[0]
return canonicalize_single_tensor_input(instances, tensor_name)
def postprocess(self, predicted_output, original_input=None, stats=None,
signature_name=None, **kwargs):
"""Performs the necessary transformations on the prediction results.
The transformations include rowifying the predicted results, and also
making sure that each input/output is a dict mapping input/output alias to
the value for that input/output.
Args:
predicted_output: list of instances returned by the predict() method on
preprocessed instances.
original_input: List of instances, before any pre-processing was applied.
stats: Stats object for recording timing information.
signature_name: the signature name to find out the signature.
**kwargs: Additional keyword arguments for postprocessing
Returns:
A list which is a dict mapping output alias to the output.
"""
_, signature = self.get_signature(signature_name)
with stats.time(prediction_utils.ROWIFY_TIME):
# When returned element only contains one result (batch size == 1),
# tensorflow's session.run() will return a scalar directly instead of a
# a list. So we need to listify that scalar.
# TODO(b/34686738): verify this behavior is correct.
def listify(value):
if not hasattr(value, "shape"):
return np.asarray([value], dtype=np.object)
elif not value.shape:
# TODO(b/34686738): pretty sure this is a bug that only exists because
# samples like iris have a bug where they use tf.squeeze which removes
# the batch dimension. The samples should be fixed.
return np.expand_dims(value, axis=0)
else:
return value
postprocessed_outputs = {
alias: listify(val)
for alias, val in six.iteritems(predicted_output)
}
postprocessed_outputs = rowify(postprocessed_outputs)
postprocessed_outputs = list(postprocessed_outputs)
with stats.time(prediction_utils.ENCODE_TIME):
try:
postprocessed_outputs = encode_base64(
postprocessed_outputs, signature.outputs)
except PredictionError as e:
logging.exception("Encode base64 failed.")
raise PredictionError(PredictionError.INVALID_OUTPUTS,
"Prediction failed during encoding instances: {0}"
.format(e.error_detail))
except ValueError as e:
logging.exception("Encode base64 failed.")
raise PredictionError(PredictionError.INVALID_OUTPUTS,
"Prediction failed during encoding instances: {0}"
.format(e))
except Exception as e: # pylint: disable=broad-except
logging.exception("Encode base64 failed.")
raise PredictionError(PredictionError.INVALID_OUTPUTS,
"Prediction failed during encoding instances")
return postprocessed_outputs
@classmethod
def from_client(cls, client, unused_model_path, **unused_kwargs):
"""Creates a TensorFlowModel from a SessionClient and model data files."""
return cls(client)
@property
def signature_map(self):
return self._client.signature_map
def create_tf_session_client(model_dir,
tags=(SERVING,),
config=None):
return SessionClient(*load_tf_model(model_dir, tags, config))
def encode_base64(instances, outputs_map):
"""Encodes binary data in a JSON-friendly way."""
if not isinstance(instances, list):
raise ValueError("only lists allowed in output; got %s" %
(type(instances),))
if not instances:
return instances
first_value = instances[0]
if not isinstance(first_value, dict):
if len(outputs_map) != 1:
return ValueError("The first instance was a string, but there are "
"more than one output tensor, so dict expected.")
# Only string tensors whose name ends in _bytes needs encoding.
tensor_name, tensor_info = next(iter(outputs_map.items()))
tensor_type = tensor_info.dtype
if tensor_type == dtypes.string:
instances = _encode_str_tensor(instances, tensor_name)
return instances
encoded_data = []
for instance in instances:
encoded_instance = {}
for tensor_name, tensor_info in six.iteritems(outputs_map):
tensor_type = tensor_info.dtype
tensor_data = instance[tensor_name]
if tensor_type == dtypes.string:
tensor_data = _encode_str_tensor(tensor_data, tensor_name)
encoded_instance[tensor_name] = tensor_data
encoded_data.append(encoded_instance)
return encoded_data
def _encode_str_tensor(data, tensor_name):
"""Encodes tensor data of type string.
Data is a bytes in python 3 and a string in python 2. Base 64 encode the data
if the tensorname ends in '_bytes', otherwise convert data to a string.
Args:
data: Data of the tensor, type bytes in python 3, string in python 2.
tensor_name: The corresponding name of the tensor.
Returns:
JSON-friendly encoded version of the data.
"""
if isinstance(data, list):
return [_encode_str_tensor(val, tensor_name) for val in data]
if tensor_name.endswith("_bytes"):
return {"b64": compat.as_text(base64.b64encode(data))}
else:
return compat.as_text(data)
def _load_tf_custom_op(model_path):
"""Loads a custom TF OP (in .so format) from /assets.extra directory."""
assets_dir = os.path.join(model_path, _CUSTOM_OP_DIRECTORY_NAME)
if tf.gfile.IsDirectory(assets_dir):
custom_ops_pattern = os.path.join(assets_dir, _CUSTOM_OP_SUFFIX)
for custom_op_path_original in tf.gfile.Glob(custom_ops_pattern):
logging.info("Found custom op file: %s", custom_op_path_original)
if custom_op_path_original.startswith("gs://"):
if not os.path.isdir(_CUSTOM_OP_LOCAL_DIR):
os.makedirs(_CUSTOM_OP_LOCAL_DIR)
custom_op_path_local = os.path.join(
_CUSTOM_OP_LOCAL_DIR, os.path.basename(custom_op_path_original))
logging.info("Copying custop op from: %s to: %s",
custom_op_path_original, custom_op_path_local)
tf.gfile.Copy(custom_op_path_original, custom_op_path_local, True)
else:
custom_op_path_local = custom_op_path_original
try:
logging.info("Loading custom op: %s", custom_op_path_local)
logging.info("TF Version: %s", tf.__version__)
tf.load_op_library(custom_op_path_local)
except RuntimeError as e:
logging.exception(
"Failed to load custom op: %s with error: %s. Prediction "
"will likely fail due to missing operations.", custom_op_path_local,
e)
| StarcoderdataPython |
3203087 | <gh_stars>0
"""Generate overview figures of all added colormaps.
This script is taken from the matplotlib documentation
https://matplotlib.org/tutorials/colors/colormaps.html
"""
import matplotlib.pyplot as plt
import numpy as np
import prettypyplot as pplt
# run setuo
pplt.use_style()
plt.rcParams['figure.dpi'] = 600
cmaps = {}
cmaps['Perceptually Uniform Sequential'] = [
'macaw', 'viridis', 'bownair', 'turbo', 'jet',
]
cmaps['Qualitative'] = [
'pastel5', 'pastel6', 'pastel_autunm', 'pastel_spring', 'pastel_rainbow',
'summertimes', 'cbf4', 'cbf5', 'cbf8', 'ufcd', 'paula', 'tol:bright',
'tol:muted', 'tol:high_contrast', 'tol:medium_contrast', 'tol:vibrant',
]
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
def plot_color_gradients(cmap_category, cmap_list):
"""Generate colormap plot."""
fig, axes = plt.subplots(nrows=len(cmap_list), figsize=(3.2, 0.15))
axes[0].set_title(cmap_category + ' colormaps')
# for similar absolute width of figures
fig.text(0, .5, r'.', c='w')
for ax, name in zip(axes, cmap_list):
ax.imshow(gradient, aspect='auto', cmap=plt.get_cmap(name))
pos = list(ax.get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3] / 2
name = name.replace('_', r'\textunderscore{}')
fig.text(x_text, y_text, name, va='center', ha='right')
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.set_axis_off()
return fig
for cmap_category, cmap_list in cmaps.items():
fig = plot_color_gradients(cmap_category, cmap_list)
pplt.savefig(f'gallery/cmaps/{cmap_category}.png')
| StarcoderdataPython |
3281556 | <reponame>loopinf/vertex-ai-samples
# -*- coding: utf-8 -*-
import sys
import os
from pandas.io import pickle
# import pandas as pd
PROJECT_ID = "dots-stock" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
USER = "shkim01" # <---CHANGE THIS
BUCKET_NAME = "gs://pipeline-dots-stock" # @param {type:"string"}
PIPELINE_ROOT = f"{BUCKET_NAME}/pipeline_root/{USER}"
from typing import NamedTuple
from kfp import dsl
from kfp.v2 import compiler
import kfp.components as comp
from kfp.v2.dsl import (Artifact,
Dataset,
Input,
Model,
Output,
Metrics,
ClassificationMetrics,
component)
from kfp.v2.google.client import AIPlatformClient
from test import test
print_op = comp.create_component_from_func(
test,
base_image="gcr.io/dots-stock/python-img-v5.2",
)
#########################################
# create pipeline #######################
#########################################
job_file_name='test.json'
@dsl.pipeline(
name=job_file_name.split('.json')[0],
pipeline_root=PIPELINE_ROOT
)
def create_awesome_pipeline():
op = print_op()
compiler.Compiler().compile(
pipeline_func=create_awesome_pipeline,
package_path=job_file_name
)
api_client = AIPlatformClient(
project_id=PROJECT_ID,
region=REGION,
)
response = api_client.create_run_from_job_spec(
job_spec_path=job_file_name,
enable_caching= True,
pipeline_root=PIPELINE_ROOT
)
######################
# @component(
# base_image="amancevice/pandas:1.3.2-slim"
# )
# def get_univ_for_price(
# # date_ref: str,
# base_item_dataset: Input[Dataset],
# bros_dataset: Input[Dataset],
# univ_dataset: Output[Dataset],
# ):
# import pandas as pd
# import logging
# import json
# logger = logging.getLogger(__name__)
# FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s"
# logging.basicConfig(format=FORMAT)
# logger.setLevel(logging.DEBUG)
# # base item
# df_top30s = pd.read_csv(base_item_dataset.path,
# index_col=0,
# dtype={'날짜': str}).reset_index(drop=True)
# # load edge_list to make bros
# df_ed = pd.read_csv(bros_dataset.path, index_col=0).reset_index(drop=True)
# df_ed_r = df_ed.copy()
# df_ed_r.rename(columns={'target':'source', 'source':'target'}, inplace=True)
# df_ed2 = df_ed.append(df_ed_r, ignore_index=True)
# df_ed2['date'] = pd.to_datetime(df_ed2.date).dt.strftime('%Y%m%d')
# dic_univ = {}
# for date, df in df_top30s.groupby('날짜'):
# logger.debug(f'date: {date}')
# l_top30 = df.종목코드.to_list()
# l_bro = df_ed2[(df_ed2.date == date) &
# (df_ed2.source.isin(l_top30))].target.unique().tolist()
# dic_univ[date] = list(set(l_top30 + l_bro ))
# with open(univ_dataset.path, 'w', encoding='utf8') as f:
# json.dump(dic_univ, f)
| StarcoderdataPython |
3265846 | <reponame>qizhenkang/myLeetCode
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 31 11:08:34 2021
@author: qizhe
"""
# import string
class Solution:
def possiblyEquals(self, s1: str, s2: str) -> bool:
"""
感觉不难:
1、对比的是数字长度
2、简单的正则表达
3、回溯即可
"""
def __dfs(s1,s2,pointer1,pointer2,letters,currents1length,currents2length):
print(pointer1,pointer2,currents1length,currents2length)
if pointer1 >= len(s1) or pointer2 >= len(s2):
print(pointer1,pointer2)
print('length!!')
if currents1length == currents2length:
return True
return False
if s1[pointer1] in letters and s2[pointer2] in letters:
if currents1length == currents2length and s1[pointer1] == s2[pointer2]:
__dfs(s1,s2,pointer1+1,pointer2+1,letters,currents1length+1,currents2length+1)
else:
return False
# if s1[pointer1] not in letters: #or s2[pointer2] not in letters:
# addlength = 0
# for i in range(pointer1,len(s1)):
# if s1[i] not in letters:
# addlength *=10
# addlength += s1[i]
# __dfs(s1,s2,pointer1+addlength,pointer2,letters)
if s2[pointer2] not in letters: #or s2[pointer2] not in letters:
addlength = 0
for i in range(pointer2,len(s2)):
if s2[i] not in letters:
addlength *=10
addlength += int(s2[i])
print(addlength)
__dfs(s1,s2,pointer1,i+1,letters,currents1length,currents2length+addlength)
else:
break
return
# def __dfs_s1length(s1,pointer1,letters,s1length,currentlength,s1DictList,currentDict):
# if pointer1 >= len(s1):
# s1length.append(currentlength)
# s1DictList.append(currentDict)
# return
# if s1[pointer1] in letters:
# currentDict[currentlength] = s1[pointer1]
# print(currentDict)
# __dfs_s1length(s1,pointer1+1,letters,s1length,currentlength+1,s1DictList,currentDict)
# del currentDict[currentlength]
# # print(currentDict)
# if s1[pointer1] not in letters:
# addlength = 0
# for i in range(pointer1,len(s1)):
# if s1[i] not in letters:
# addlength *=10
# addlength += int(s1[i])
# __dfs_s1length(s1,i+1,letters,s1length,currentlength+addlength,s1DictList,currentDict)
# else:
# break
# return
letters = set('abcdefghijklmnopqrstuvwxyz')
pointer1 = 0
pointer2 = 0
s1length = []
s2length = []
s1DictList = []
__dfs(s1,s2,0,0,letters,0,0)
# __dfs_s1length(s1,0,letters,s1length,0,s1DictList,{})
# print(s1length,s1DictList)
# __dfs_s1length(s2,0,letters,s2length,0)
# okFlag = 0
# for i in s1length:
# if i in s2length:
# okFlag = 1
# break
# print(s1length,s2length)
# if not okFlag:
# # 长度不同,返回否
# return False
return s1length
if __name__ == '__main__':
solu = Solution()
# distance = [2,1,1,2]
# distance = [1,2,3,4]
# nums = ["Hello","Alaska","Dad","Peace"]
nums = [1,3]
start = 6
goal = 4
s1 = "internationalization"
s2 = "i18n"
result = solu.possiblyEquals(s1,s2)
# output_Str = 'result = ' + solu.intToRoman(input_int)
output_Str = ' result = ' + str(result)
print(output_Str) | StarcoderdataPython |
3309047 | from django.forms import ModelForm
from core.models import Post, Comment
class PostForm(ModelForm):
class Meta:
model = Post
fields = ('message',)
class CommentForm(ModelForm):
class Meta:
model = Comment
fields = ('new_comment',)
| StarcoderdataPython |
20598 | #INVASION COMMANDS:
# !invasions // !atinvasions <reward> // !rminvasions
import discord
from discord.ext import commands
import asyncio
from src import sess
class Invasions(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.alert_dict = {} # user: reward, list of prev invasions with reward
@commands.Cog.listener()
async def on_ready(self):
print('Invasions Online')
# Periodically check
while True:
await asyncio.gather(self.check_invasions(50))
@commands.command()
async def invasions(self, ctx):
inv = await sess.request('invasions')
if inv == 0:
print("Could not retrieve data.")
return
embed = discord.Embed(title="Invasions")
# Organize invasions into description/type
inv_dict = {} # example: {GrineerOffensive: [{mission}, {mission}], }
for i in inv:
if not i['completed']: # Do not add invasions that have been completed
if i['desc'] in inv_dict:
inv_dict[i['desc']].append(i)
else:
inv_dict[i['desc']] = []
inv_dict[i['desc']].append(i)
# Show invasion information grouped via description/type
for key, li in inv_dict.items():
info = ''
for v in li:
node = v['node']
atk_reward = v['attackerReward']['asString'] or 'N/A'
def_reward = v['defenderReward']['asString'] or 'N/A'
attackers = v['attackingFaction']
defenders = v['defendingFaction']
info += node + ': \n' + attackers + f' [{atk_reward}]' + ' vs ' + defenders + f' [{def_reward}]\n'
embed.add_field(name=f'{key}', value=f'{info}', inline=False)
await ctx.send(embed=embed)
# Add user of command to the alert_dict to be alerted of invasions with specific reward
@commands.command()
async def atinvasions(self, ctx, *, reward=''):
try:
if not reward:
await ctx.send(ctx.message.author.mention + ' Enter an invasion reward to be alerted for.')
else:
self.alert_dict[ctx.message.author] = [reward, []]
await ctx.message.author.send(
f' You will now be alerted for invasions with a {reward.title()} reward.'
' To stop being alerted, use command "!rminvasions"')
except ValueError:
await ctx.message.author.send('Enter an invasion reward to be alerted for.')
# Remove user of command from the alert_dict to no longer be notified of invasion rewards
@commands.command()
async def rminvasions(self, ctx):
try:
self.alert_dict.pop(ctx.message.author)
await ctx.message.author.send('You are no longer being alerted for invasions.')
except KeyError:
await ctx.message.author.send('You are currently not being alerted.')
# THIS WILL BE PERIODICALLY CALLED on_ready
# Check for invasions with specific rewards for each user
async def check_invasions(self, delay):
# Wait before making request
await asyncio.sleep(delay)
inv = await sess.request('invasions')
if inv == 0:
print("Could not retrieve data.")
return
embed = discord.Embed(title="Invasions")
# Organize invasions into description/type
inv_dict = {} # example: {GrineerOffensive: [{mission}, {mission}], }
for i in inv:
if not i['completed']: # Do not add invasions that have been completed
if i['desc'] in inv_dict:
inv_dict[i['desc']].append(i)
else:
inv_dict[i['desc']] = []
inv_dict[i['desc']].append(i)
# Check each user's tracked reward and notify of any missions with their specific reward
for user in self.alert_dict.keys():
embed.clear_fields()
user_inv = []
for key, li in inv_dict.items():
info = ''
for v in li:
if self.alert_dict[user][0].lower() in v['attackerReward']['asString'].lower() \
or self.alert_dict[user][0].lower() in v['defenderReward']['asString'].lower():
user_inv.append(v)
node = v['node']
atk_reward = v['attackerReward']['asString'] or 'N/A'
def_reward = v['defenderReward']['asString'] or 'N/A'
attackers = v['attackingFaction']
defenders = v['defendingFaction']
info += node + ': \n' + attackers + f' [{atk_reward}]' + ' vs ' + defenders + f' [{def_reward}]\n'
if info != '':
embed.add_field(name=f'{key}', value=f'{info}', inline=False)
# Check if need to notify user
if len(self.alert_dict[user][1]) != len(user_inv): # If lengths do not match, alert of update
self.alert_dict[user][1] = user_inv.copy()
await user.send(f'Invasions with {self.alert_dict[user][0].title()} reward has been updated!',
embed=embed)
else:
for i in range(len(self.alert_dict[user][1])):
if self.alert_dict[user][1][i]['node'] != user_inv[i]['node']:
self.alert_dict[user][1] = user_inv.copy()
await user.send(f'Invasions with {self.alert_dict[user][0].title()} reward has been updated!',
embed=embed)
def setup(bot):
bot.add_cog(Invasions(bot))
| StarcoderdataPython |
3245103 | import sys
from typing import List, Tuple
import re
import numpy as np
from abnumber.exceptions import ChainParseError
try:
from anarci.anarci import anarci
except ImportError:
# Only print the error without failing - required to import
print('ANARCI module not available. Please install it separately or install AbNumber through Bioconda')
print('See: https://abnumber.readthedocs.io/')
sys.exit(1)
POS_REGEX = re.compile(r'([HL]?)(\d+)([A-Z]?)')
WHITESPACE = re.compile(r'\s+')
def _validate_chain_type(chain_type):
assert chain_type in ['H', 'L', 'K'], \
f'Invalid chain type "{chain_type}", it should be "H" (heavy), "L" (lambda light chian) or "K" (kappa light chain)'
def _anarci_align(sequence, scheme, allowed_species, assign_germline=False) -> List[Tuple]:
from abnumber.position import Position
sequence = re.sub(WHITESPACE, '', sequence)
all_numbered, all_ali, all_hits = anarci(
[('id', sequence)],
scheme=scheme,
allowed_species=allowed_species,
assign_germline=assign_germline
)
seq_numbered = all_numbered[0]
seq_ali = all_ali[0]
if seq_numbered is None:
raise ChainParseError(f'Variable chain sequence not recognized: "{sequence}"')
assert len(seq_numbered) == len(seq_ali), 'Unexpected ANARCI output'
results = []
for (positions, start, end), ali in zip(seq_numbered, seq_ali):
chain_type = ali['chain_type']
species = ali['species']
v_gene = ali['germlines']['v_gene'][0][1] if assign_germline else None
j_gene = ali['germlines']['j_gene'][0][1] if assign_germline else None
if scheme == 'imgt':
for (num, letter), aa in positions:
if str(num) == '61' and str(letter) == 'A':
raise NotImplementedError(f'Cannot parse sequence "{sequence}", '
f'ANARCI numbering of IMGT position 61A is currently broken: '
f'https://github.com/oxpig/ANARCI/issues/14')
aa_dict = {Position(chain_type=chain_type, number=num, letter=letter, scheme=scheme): aa
for (num, letter), aa in positions if aa != '-'}
tail = sequence[end+1:]
results.append((aa_dict, chain_type, tail, species, v_gene, j_gene))
return results
def _get_unique_chains(chains):
seqs = set()
chains_filtered = []
for chain in chains:
if chain.seq in seqs:
continue
seqs.add(chain.seq)
chains_filtered.append(chain)
return chains_filtered
# Based on positive score in Blosum62
SIMILAR_PAIRS = {'AA', 'AS', 'CC', 'DD', 'DE', 'DN', 'ED', 'EE', 'EK', 'EQ', 'FF', 'FW', 'FY', 'GG', 'HH', 'HN', 'HY',
'II', 'IL', 'IM', 'IV', 'KE', 'KK', 'KQ', 'KR', 'LI', 'LL', 'LM', 'LV', 'MI', 'ML', 'MM', 'MV', 'ND',
'NH', 'NN', 'NS', 'PP', 'QE', 'QK', 'QQ', 'QR', 'RK', 'RQ', 'RR', 'SA', 'SN', 'SS', 'ST', 'TS', 'TT',
'VI', 'VL', 'VM', 'VV', 'WF', 'WW', 'WY', 'YF', 'YH', 'YW', 'YY'}
def is_similar_residue(a, b):
if a == '-' or b == '-':
return a == b
return a+b in SIMILAR_PAIRS
def is_integer(object):
return isinstance(object, int) or isinstance(object, np.integer)
SUPPORTED_SCHEMES = ['imgt', 'aho', 'chothia', 'kabat']
SUPPORTED_CDR_DEFINITIONS = ['imgt', 'chothia', 'kabat', 'north']
SCHEME_BORDERS = {
# Start coordinates
# CDR1, FR2, CDR2, FR3, CDR3, FR4
'imgt': [27, 39, 56, 66, 105, 118, 129],
'kabat_H': [31, 36, 50, 66, 95, 103, 114],
'kabat_K': [24, 35, 50, 57, 89, 98, 108],
'kabat_L': [24, 35, 50, 57, 89, 98, 108],
'chothia_H': [26, 33, 52, 57, 95, 103, 114],
'chothia_K': [24, 35, 50, 57, 89, 98, 108],
'chothia_L': [24, 35, 50, 57, 89, 98, 108],
'north_H': [23, 36, 50, 59, 93, 103, 114],
'north_K': [24, 35, 49, 57, 89, 98, 108],
'north_L': [24, 35, 49, 57, 89, 98, 108],
}
# { scheme -> { region -> list of position numbers } }
SCHEME_REGIONS = {
scheme: {
'FR1': list(range(1, borders[0])),
'CDR1': list(range(borders[0], borders[1])),
'FR2': list(range(borders[1], borders[2])),
'CDR2': list(range(borders[2], borders[3])),
'FR3': list(range(borders[3], borders[4])),
'CDR3': list(range(borders[4], borders[5])),
'FR4': list(range(borders[5], borders[6])),
} for scheme, borders in SCHEME_BORDERS.items()
}
# { scheme -> { position number -> region } }
SCHEME_POSITION_TO_REGION = {
scheme: {pos_num: region for region, positions in regions.items() for pos_num in positions} \
for scheme, regions in SCHEME_REGIONS.items()
}
# { scheme -> set of vernier position numbers }
SCHEME_VERNIER = {
# 'imgt_H': frozenset([2, 52, 53, 54, 76, 78, 80, 82, 87, 118]),
# 'chothia_H': frozenset([2, 47, 48, 49, 67, 69, 71, 73, 78, 93, 94, 103]),
# 'north_H': frozenset([2, 47, 48, 49, 67, 69, 71, 73, 78, 93, 94, 103]),
'kabat_H': frozenset([2, 27, 28, 29, 30, 47, 48, 49, 67, 69, 71, 73, 78, 93, 94, 103]),
# 'imgt_K': frozenset([2, 4, 41, 42, 52, 53, 54, 55, 78, 80, 84, 85, 87, 118]),
# 'imgt_L': frozenset([2, 4, 41, 42, 52, 53, 54, 55, 78, 80, 84, 85, 87, 118]),
# 'chothia_K': frozenset([2, 4, 35, 36, 46, 47, 48, 49, 64, 66, 68, 69, 71, 98]),
# 'chothia_L': frozenset([2, 4, 35, 36, 46, 47, 48, 49, 64, 66, 68, 69, 71, 98]),
# 'north_K': frozenset([2, 4, 35, 36, 46, 47, 48, 49, 64, 66, 68, 69, 71, 98]),
# 'north_L': frozenset([2, 4, 35, 36, 46, 47, 48, 49, 64, 66, 68, 69, 71, 98]),
'kabat_K': frozenset([2, 4, 35, 36, 46, 47, 48, 49, 64, 66, 68, 69, 71, 98]),
'kabat_L': frozenset([2, 4, 35, 36, 46, 47, 48, 49, 64, 66, 68, 69, 71, 98]),
}
#'kabat_H': 31-35, 50-65, 95-102
#'kabat_K': 24-34, 50-56, 89-97
| StarcoderdataPython |
89638 | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from mo.front.extractor import FrontExtractorOp
from mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from mo.ops.squeeze import Squeeze
class SqueezeExtractor(FrontExtractorOp):
op = 'squeeze'
enabled = True
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
Squeeze.update_node_stat(node, {'squeeze_dims': attrs.int("axis", None), 'keep_at_least_1d': True})
return cls.enabled
| StarcoderdataPython |
36591 | from flask import Flask, render_template, request, redirect, url_for, Markup, \
flash # Imports Flask and all required modules
import databasemanager # Provides the functionality to load stuff from the database
app = Flask(__name__)
import errormanager # Enum for types of errors
# DECLARE datamanager as TYPE: databasemanager
datamanager = databasemanager
# DECLARE errorman as TYPE: errormanager
errorman = errormanager
# DECLARE Current User as string
# Provides a means of the application knowing who is signed in
CurrentUser: str
# Route function for homepage.
# @return Returns render template of base.hmtl
@app.route('/')
def Home():
datamanager.LoadContent()
return render_template('base.html', entries=datamanager.entries, bFailure=False, app=datamanager)
# Checks the username and the password and handles any errors
# @route Homepage
# @method: POST
# @return redirect: Redirect to 'AdminHome' function after successful login
# @return render_template: base.html with failure condition
@app.route('/', methods=['POST'])
def Login():
if request.method == "POST":
try:
password = request.form['Password']
username = request.form['Username']
if (password != '') and (username != ''):
if datamanager.CheckUser(username, password) == True:
global CurrentUser
CurrentUser = username
globals()
return redirect(url_for('AdminHome', auth=str(datamanager.Encrypt('True')), user=username))
else:
Failure = errorman.EErrorType.FailedPassword
return render_template('base.html', fail=Failure, failenum=errorman.EErrorType,
entries=datamanager.entries, bFailure=True, app=datamanager)
else:
Failure = errorman.EErrorType.FailedNone
return render_template('base.html', fail=Failure, failenum=errorman.EErrorType, bFailure=True,
entires=datamanager.entries, app=datamanager)
except:
return render_template('base.html', fail=errorman.EErrorType.FailedNone, failenum=errorman.EErrorType,
bFailure=True, entries=datamanager.entries)
# Main route for admin homepage
# Checks for encrypted string to ensure access was granted
# @route: '/adminbase' <auth: encrypted string> <user: user's username>
# @param auth: Encrypted string used for security
# @param user: Username of user
# @return render_template: adminbase.html with entries, the username and the datamanager
# @return redirect: 'Home' will return the user to home if they don't have valid acsses
@app.route('/adminbase/<auth> <user>')
def AdminHome(auth, user):
if auth == str(datamanager.Encrypt('True')):
datamanager.LoadContent()
print(datamanager.entries)
return render_template('adminbase.html', entries=datamanager.entries, user=user, app=datamanager)
else:
return redirect(url_for('Home'))
# Gets the users inputted values for a new entry and adds them to the website
# @route: '/adminbase.html' <user: username of signed in user>
# @param user: username of the signed in user
# @return redirect: 'Admin Home' function with encryption string and username
@app.route('/adminbase.html/<user>', methods=["POST"])
def CreateNew(user: str):
if request.method == "POST":
# try:
title = request.form['Title']
desc = request.form['Desc']
image = request.form['Image']
caption = request.form['Caption']
id = len(datamanager.entries)
ind = str(id)
datamanager.AddNewItem(title, desc, caption, image, id, ind, 0)
return redirect(url_for('AdminHome', auth=str(datamanager.Encrypt('True')), user=user))
# except:
# return render_template('error.html', fail=errorman.EErrorType.FailedNone, failenum=errorman.EErrorType)
# Deprecated
#@<EMAIL>('/adminbase', methods=["POST"])
#def Delete():
#if request.method == "POST":
# delete = request.form['Del']
# if delete == True:
# datamanager.RemoveItem(0)
# return render_template(url_for('AdminHome', auth=str(datamanager.Encrypt('True'))))
#else:
# return render_template(url_for('AdminHome', auth=str(datamanager.Encrypt('True'))))
# Main route for signup page
# @route: '/signup'
# @return render_template: signup.html
@app.route('/signup')
def SignUp():
return render_template('signup.html')
# Gets the entry input values and adds to database also handles errors
# @route '/sign' methods: GET and POST
# @return redirect: 'Home'
# @return render_template: 'error.html' with error type
@app.route('/sign', methods=["POST", "GET"])
def AddNewUser():
try:
if request.method == "POST":
AdminKey = request.form['Key']
Password = request.form['Password']
Username = request.form['Username']
ConfirmPass = request.form['<PASSWORD>']
if datamanager.CheckKey(AdminKey) == True:
if ((Password != '') and (Username != '') and (ConfirmPass != '')):
if ConfirmPass == Password:
if datamanager.NewUser(Username, Password) == True:
return redirect(url_for('Home'))
else:
return render_template('error.html', fail=errorman.EErrorType.FailedPassword,
failenum=errorman.EErrorType)
else:
return render_template('error.html', fail=errorman.EErrorType.FailedNone,
failenum=errorman.EErrorType)
return render_template('error.html')
except:
return render_template('error.html', fail=errorman.EErrorType.FailedNone, failenum=errorman.EErrorType)
# Deprecated
@app.route('/likes/<id>')
def Like(id: int):
datamanager.AddLike(id)
return redirect(url_for('Home'))
# Deprecated
@app.route('/deleteconfirm', methods=['GET'])
def ChangeDeleteTarget():
id = request.form['Delete']
global deletetarget
deletetarget = id
print(deletetarget)
globals()
return 'hi' # This exists because Flask is bad
# Deprecated
@app.route('/delete')
def Delete():
datamanager.RemoveItem(datamanager.deletetarget)
global CurrentUser
CurrentUser = 'user'
return redirect(url_for('AdminHome', auth=str(datamanager.Encrypt('True')), user=CurrentUser, app=datamanager))
# Main Flask Loop
if __name__ == '__main__':
app.secret_key = datamanager.Encrypt('key')
app.run()
| StarcoderdataPython |
3203652 | def register(app):
from . import new_channel_command, incoming_channel
new_channel_command.register(app)
incoming_channel.register(app) | StarcoderdataPython |
3230860 | # coding=utf-8
import base64
import os
import lib.settings
modulname = "Core"
import socket
import sys
import subprocess
sys.path.insert(0, "lib/")
sys.path.insert(1, "lib/settings/")
import RPi.GPIO as GPIO
from flask import Flask
from flask import render_template
from flask_fontawesome import FontAwesome
from lib.pins import v_LED1_Pin, v_LED2_Pin, v_PUMP1_Pin, v_PUMP2_Pin
from lib.functions import console, uibootscreen, \
SensorData, processing, statusscreen, gpiosetup, komponententester, getdbdata, get_latest_image, getallimages, \
get_base64_encoded_image, getsensordata
import websettings
import version
lights = {
v_LED1_Pin: {'name': 'LED Links', 'state': GPIO.LOW},
v_LED2_Pin: {'name': 'LED Rechts', 'state': GPIO.LOW}
}
pumps = {
v_PUMP1_Pin: {'name': 'Pumpe Links', 'state': GPIO.LOW},
v_PUMP2_Pin: {'name': 'Pumpe Rechts', 'state': GPIO.LOW}
}
console(modulname, "System startet")
console(modulname, version.v_Application + version.v_Version)
console(modulname, version.v_Description)
console(modulname,"GPIO - Einrichtung")
gpiosetup()
komponententester()
console(modulname,"Display und Sensoren werden Initzalisiert !")
uibootscreen()
app = Flask(__name__, template_folder='dashboard')
fa = FontAwesome(app)
console(modulname, "--Begin des Zyklus--")
humidity, temperature, sensor_humitidy, sensor_temp = getsensordata()
console(modulname, "--Ende des Zyklus--")
# GPIO Steuerung der Pflanzenlichter - Start
@app.route("/LIGHT/<changeLightPin>/<actionLight>/<returnto>")
def lightaction(changeLightPin, actionLight, returnto):
# Convert the pin from the URL into an integer:
changeLightPin = int(changeLightPin)
# Get the device name for the pin being changed:
deviceName = lights[changeLightPin]['name']
# If the action part of the URL is "on," execute the code indented below:
if actionLight == "on":
# Set the pin high:
GPIO.output(changeLightPin, GPIO.HIGH)
# Save the status message to be passed into the template:
message = "Schalte " + deviceName + " ein."
if actionLight == "off":
GPIO.output(changeLightPin, GPIO.LOW)
message = "Schalte " + deviceName + " aus."
for lpin in lights:
lights[lpin]['state'] = GPIO.input(lpin)
for ppin in pumps:
pumps[ppin]['state'] = GPIO.input(ppin)
Template_Pumps = {'pumps': pumps}
Template_Lights = {'lights': lights}
timelaps_folder_picture = lib.settings.timelaps_folder_picture
Lastpic = get_latest_image(timelaps_folder_picture)
appversion: object = version.v_Version
appname = version.v_Application
Template_Sensor = SensorData("web")
Template_Sensordaten = getdbdata()
return render_template(returnto + '.html', title='Welcome', v_Application=appname, v_Version=appversion,
**Template_Pumps, **Template_Lights, **Template_Sensor,**Template_Sensordaten,lastpic=Lastpic)
# GPIO Steuerung der Bewässerung - Start
@app.route("/Pump/<changePumpPin>/<actionPUMP>/<returnto>")
def pumpaction(changePumpPin, actionPUMP, returnto):
# Convert the pin from the URL into an integer:
changePumpPin = int(changePumpPin)
# Get the device name for the pin being changed:
deviceName = pumps[changePumpPin]['name']
# If the action part of the URL is "on," execute the code indented below:
if actionPUMP == "on":
# Set the pin high:
GPIO.output(changePumpPin, GPIO.HIGH)
# Save the status message to be passed into the template:
message = "Schalte " + deviceName + " ein."
if actionPUMP == "off":
GPIO.output(changePumpPin, GPIO.LOW)
message = "Schalte " + deviceName + " aus."
for lpin in lights:
lights[lpin]['state'] = GPIO.input(lpin)
for ppin in pumps:
pumps[ppin]['state'] = GPIO.input(ppin)
timelaps_folder_picture = lib.settings.timelaps_folder_picture
Lastpic = get_latest_image(timelaps_folder_picture)
Template_Pumps = {'pumps': pumps}
Template_Lights = {'lights': lights}
appversion: object = version.v_Version
appname = version.v_Application
Template_Sensor = SensorData("web")
Template_Sensordaten = getdbdata()
return render_template(returnto + '.html', title='Welcome', v_Application=appname, v_Version=appversion,
**Template_Pumps, **Template_Lights, **Template_Sensor,**Template_Sensordaten,lastpic=Lastpic)
@app.route('/')
@app.route('/index')
def index():
for lpin in lights:
lights[lpin]['state'] = GPIO.input(lpin)
for ppin in pumps:
pumps[ppin]['state'] = GPIO.input(ppin)
timelaps_folder_picture = lib.settings.timelaps_folder_picture
Lastpic = get_latest_image(timelaps_folder_picture)
Template_Pumps = {'pumps': pumps}
Template_Lights = {'lights': lights}
Template_Sensor = SensorData("web")
Template_Sensordaten = getdbdata()
appversion: object = version.v_Version
appname = version.v_Application
return render_template('index.html', title='Welcome', v_Application=appname, v_Version=appversion,
**Template_Pumps, **Template_Lights, **Template_Sensor,**Template_Sensordaten,lastpic=Lastpic)
@app.route('/Light/')
def light():
for lpin in lights:
lights[lpin]['state'] = GPIO.input(lpin)
for ppin in pumps:
pumps[ppin]['state'] = GPIO.input(ppin)
timelaps_folder_picture = lib.settings.timelaps_folder_picture
Lastpic = get_latest_image(timelaps_folder_picture)
Template_Pumps = {'pumps': pumps}
Template_Lights = {'lights': lights}
Template_Sensor = SensorData("web")
appversion: object = version.v_Version
appname = version.v_Application
Template_Sensordaten = getdbdata()
return render_template('/light.html', title='Welcome', v_Application=appname, v_Version=appversion,
**Template_Pumps, **Template_Lights, **Template_Sensor,**Template_Sensordaten,lastpic=Lastpic)
@app.route('/Pumps/')
def pumpen():
for lpin in lights:
lights[lpin]['state'] = GPIO.input(lpin)
for ppin in pumps:
pumps[ppin]['state'] = GPIO.input(ppin)
timelaps_folder_picture = lib.settings.timelaps_folder_picture
Lastpic = get_latest_image(timelaps_folder_picture)
Template_Pumps = {'pumps': pumps}
Template_Lights = {'lights': lights}
Template_Sensor = SensorData("web")
statusscreen()
appversion: object = version.v_Version
appname = version.v_Application
Template_Sensordaten = getdbdata()
return render_template('/pumps.html', title='Welcome', v_Application=appname, v_Version=appversion,
**Template_Pumps, **Template_Lights, **Template_Sensor,**Template_Sensordaten,**Lastpic)
@app.route('/Sensors/')
def sensors():
for lpin in lights:
lights[lpin]['state'] = GPIO.input(lpin)
for ppin in pumps:
pumps[ppin]['state'] = GPIO.input(ppin)
console(modulname, "Rufe Sensordaten ab")
timelaps_folder_picture = lib.settings.timelaps_folder_picture
Lastpic = get_latest_image(timelaps_folder_picture)
Template_Sensor = SensorData("web")
Template_Pumps = {'pumps': pumps}
Template_Lights = {'lights': lights}
appversion: object = version.v_Version
appname = version.v_Application
Template_Sensordaten = getdbdata()
print(Template_Sensordaten)
return render_template('/sensors.html', title='Welcome', v_Application=appname, v_Version=appversion,
**Template_Pumps, **Template_Lights, **Template_Sensor,**Template_Sensordaten,lastpic=Lastpic)
@app.route('/Stats/')
def stats():
for lpin in lights:
lights[lpin]['state'] = GPIO.input(lpin)
for ppin in pumps:
pumps[ppin]['state'] = GPIO.input(ppin)
timelaps_folder_picture = lib.settings.timelaps_folder_picture
Lastpic = get_latest_image(timelaps_folder_picture)
Template_Pumps = {'pumps': pumps}
Template_Lights = {'lights': lights}
Template_Sensor = SensorData("web")
appversion: object = version.v_Version
appname = version.v_Application
console(modulname, "Statistik wird geladen ...")
console(modulname, "Statistik wird ausgewertet und bereitgestellt ...")
Template_Sensordaten = getdbdata()
return render_template('/stats.html', title='Welcome', v_Application=appname, v_Version=appversion,
**Template_Pumps, **Template_Lights, **Template_Sensor,**Template_Sensordaten,lastpic=Lastpic)
@app.route('/system/restart')
def restartplanter():
modulname="Systemdienst"
for lpin in lights:
lights[lpin]['state'] = GPIO.input(lpin)
for ppin in pumps:
pumps[ppin]['state'] = GPIO.input(ppin)
timelaps_folder_picture = lib.settings.timelaps_folder_picture
Lastpic = get_latest_image(timelaps_folder_picture)
Template_Pumps = {'pumps': pumps}
Template_Lights = {'lights': lights}
Template_Sensor = SensorData("web")
appversion: object = version.v_Version
appname = version.v_Application
console(modulname, "Statistik wird geladen ...")
console(modulname, "Statistik wird ausgewertet und bereitgestellt ...")
Template_Sensordaten = getdbdata()
subprocess.Popen(['./startup.sh restart'], shell=True)
return render_template('/wait.html', title='Welcome', v_Application=appname, v_Version=appversion,
**Template_Pumps, **Template_Lights, **Template_Sensor,**Template_Sensordaten,lastpic=Lastpic)
@app.route('/system/restartos')
def restartos():
modulname="Systemdienst"
for lpin in lights:
lights[lpin]['state'] = GPIO.input(lpin)
for ppin in pumps:
pumps[ppin]['state'] = GPIO.input(ppin)
timelaps_folder_picture = lib.settings.timelaps_folder_picture
Lastpic = get_latest_image(timelaps_folder_picture)
Template_Pumps = {'pumps': pumps}
Template_Lights = {'lights': lights}
Template_Sensor = SensorData("web")
appversion: object = version.v_Version
appname = version.v_Application
console(modulname, "Statistik wird geladen ...")
console(modulname, "Statistik wird ausgewertet und bereitgestellt ...")
Template_Sensordaten = getdbdata()
subprocess.Popen(['reboot'], shell=True)
return render_template('/wait.html', title='Welcome', v_Application=appname, v_Version=appversion,
**Template_Pumps, **Template_Lights, **Template_Sensor,**Template_Sensordaten )
@app.route('/Webcam/')
def webcam():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
for lpin in lights:
lights[lpin]['state'] = GPIO.input(lpin)
for ppin in pumps:
pumps[ppin]['state'] = GPIO.input(ppin)
timelaps_folder_picture = lib.settings.timelaps_folder_picture
Lastpic = get_latest_image(timelaps_folder_picture)
allimages =getallimages(timelaps_folder_picture)
Template_Pumps = {'pumps': pumps}
Template_Lights = {'lights': lights}
Template_Sensor= SensorData("web")
appversion: object = version.v_Version
appname = version.v_Application
console(modulname, "Webcam Stream wurde gestartet")
Template_Sensordaten = getdbdata()
return render_template('/webcam.html',host=s.getsockname()[0], port=websettings.s_server_port,title='Welcome', v_Application=appname, v_Version=appversion,
**Template_Pumps, **Template_Lights, **Template_Sensor,**Template_Sensordaten,
lastpic=Lastpic,**allimages)
@app.route("/Webcam/attachment/get/<filename>")
def emailattchment(filename):
print(filename)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
for lpin in lights:
lights[lpin]['state'] = GPIO.input(lpin)
for ppin in pumps:
pumps[ppin]['state'] = GPIO.input(ppin)
timelaps_folder_picture = lib.settings.timelaps_folder_picture
Lastpic = get_latest_image(timelaps_folder_picture)
allimages = getallimages(timelaps_folder_picture)
bcode=get_base64_encoded_image(filename)
Template_Pumps = {'pumps': pumps}
Template_Lights = {'lights': lights}
Template_Sensor = SensorData("web")
appversion: object = version.v_Version
appname = version.v_Application
console(modulname, "Webcam Stream wurde gestartet")
Template_Sensordaten = getdbdata()
return bcode
if __name__ == "__main__":
console(modulname, "Webserver startet bitte warten!")
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
console(modulname, "Web-Dashboard wurde gestartet")
app.run(host=s.getsockname()[0], port=websettings.s_server_port)
| StarcoderdataPython |
3237581 | import smtplib
import random
from pytube import YouTube
import playsound
def email_bot(send_addr, password, recv_addr, body, server, port, sub='No Subject'):
with smtplib.SMTP(server, port) as smtp:
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login(send_addr, password)
sub_msg = 'Subject: {}'.format(sub)
body_msg = '\n\n {}'.format(body)
final_msg = sub_msg + body_msg
smtp.sendmail(send_addr, recv_addr, final_msg)
return True
def email_address_slicer(full_addr):
splitList = full_addr.split('@')
username = splitList[0]
domain = splitList[1]
return username, domain
def yt_downloader(video_url):
yt = YouTube(video_url)
if yt.streams.first().download():
return True
else:
return False
def roll_dice(dice_1=True):
if dice_1 == True:
rolls = [1, 2, 3, 4, 5, 6]
return random.choice(rolls)
else:
rolls = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
return random.choice(rolls)
def timer(seconds, audio_file):
time = seconds
for i in range(0, seconds):
if time <= 0:
playsound.playsound(audio_file)
time = time - 1
| StarcoderdataPython |
3385135 | <gh_stars>1-10
from STL_control import *
from rci_family import *
import math as m
pi=3.1415
s=STL_system(4,2,16)
s.matrix_installation()
s.A[1,1]=1
s.A[1,2]=1
s.A[2,2]=1
s.A[3,3]=1
s.A[3,4]=1
s.A[4,4]=1
s.B[2,1]=1
s.B[4,2]=1
s.T=25
s.add_variables()
s.add_secondary_signal_state(1,[0,0,-1,0],4) # -y+4>0 --> 4>y
s.add_secondary_signal_state(2,[1,0,0,0],-7) # x-7>0 --> x>7
s.add_secondary_signal_state(3,[0,0,1,0],-8) # y-8>0 --> y>8
s.add_secondary_signal_state(4,[-1,0,0,0],3) # -x+3>0 --> 3>x
s.add_secondary_signal_state(5,[0,0,1,0],-10) # y-10>0 --> y>10
s.add_secondary_signal_state(6,[-1,0,0,0],11) #-x+11>0 --> 11>x
s.add_secondary_signal_state(7,[0,0,-1,0],11) # -y+11>0 --> 11>y
s.add_secondary_signal_state(8,[1,0,0,0],-10) # x-10>0 --> x>10
s.add_secondary_signal_state(9,[1,0,0,0],-1) # x-1>0 --> x>1
s.add_secondary_signal_state(10,[-1,0,0,0],2) # -x+2>0 --> 2>x
s.add_secondary_signal_state(11,[0,0,1,0],-7) # y-7>0 --> y>7
s.add_secondary_signal_state(12,[0,0,-1,0],8) # -y +8 > 0 --> 8>y
s.add_secondary_signal_control(13,[1,0],1) # u+100>0 --> u>-100
s.add_secondary_signal_control(14,[-1,0],1) # -u+100>0 --> 100>u
s.add_secondary_signal_control(15,[0,1],1)
s.add_secondary_signal_control(16,[0,-1],1)
s.add_formula("obstacle")
s.disjunction("obstacle",[1,2,3,4])
s.add_formula("discover")
s.conjunction("discover",[5,6,7,8])
s.add_formula("upload")
s.conjunction("upload",[9,10,11,12])
s.add_formula("controls")
s.conjunction("controls",[13,14,15,16])
s.add_formula("phi_1")
s.always("phi_1","obstacle",range(0,s.T))
s.add_formula("phi_2")
s.eventually("phi_2","discover",range(0,20))
s.add_formula("phi_3")
s.eventually("phi_3","upload",range(20,s.T))
s.add_formula("phi_4")
s.always("phi_4","controls",range(0,s.T))
s.add_formula("phi_whole")
s.conjunction("phi_whole",["phi_1","phi_2","phi_3","phi_4"])
s.initial_condition([0,0,0,0])
s.integer_encoding()
s.solve("phi_whole")
s.write_to_file()
# print "robustness was",s.r.X
# for t in range(0,s.T):
# print t,"x:",s.x[1,t].X,"vx:",s.x[2,t].X," y:",s.x[3,t].X,"vy:",s.x[4,t].X, "ux:", s.u[1,t].X, "uy:", s.u[2,t].X
# print t, "z upload", s.z["upload",t].X, "z obstacle", s.z["obstacle",t].X, "z discover", s.z["discover",t].X
# print "\n"
"""
Here I start computing the tube!
Then we will add the tube to the nominal trajectory
"""
tube=system()
tube.n=s.n # number of variables
tube.m=s.m # number of controls
tube.K=10 # Design variable, degree
tube.nW=8 # Number of dis set rows
tube.nX=24 # rows of X, rows of H
tube.nU=8 # rows of U, rows of P
tube.A=s.A
tube.B=s.B
tube.F={}
tube.g={}
scale_w=1
for i in range(1,s.n+1):
tube.F[2*i-1,i]=1
tube.F[2*i,i]=-1
tube.g[2*i-1]=0.1*scale_w
tube.g[2*i]=0.1*scale_w
tube.F=complete_matrix(tube.F)
tube.H=s.Ex
tube.r=s.ex
tube.P=s.Fu
tube.q=s.fu
tube.compute_AA()
tube.compute_HAB()
tube.compute_FAB()
for i in range(1,tube.n+1):
tube.mu[i]=0
for j in range(1,tube.m+1):
tube.v[j]=0
tube.RCI()
tube.compute_D()
d={}
N=8
vertices=[]
for theta_1 in range(0,N):
for theta_2 in range(0,N):
for theta_3 in range(0,N):
scale=2*pi/N
d[1]=m.cos(theta_1*scale)*m.cos(theta_2*scale)*m.cos(theta_3*scale)
d[2]=m.sin(theta_1*scale)*m.cos(theta_2*scale)*m.cos(theta_3*scale)
d[3]=m.sin(theta_2*scale)*m.cos(theta_3*scale)
d[4]=m.sin(theta_3*scale)
v=tube.RCI_vertex(d)
if not v in vertices:
vertices.append(v)
f=open("tube_vertices.txt","w")
for v in vertices:
for i in range(1,len(v)+1):
f.write("%0.2f "%v[i])
f.write("\n")
f.close()
f=open("tube_state.txt","w")
tube.x=tube.mu
for t in range(0,s.T+1):
for i in range(1,tube.n+1):
f.write("%0.2f "%tube.x[i])
f.write("\n")
tube.RCI_control(tube.x)
tube.evolve()
f.close()
print "\nbeta is",tube.beta
print "gamma is",tube.gamma
| StarcoderdataPython |
1628671 | <filename>topCoder/srms/100s/srm146/div2/rectangular_grid.py
class RectangularGrid:
def countRectangles(self, width, height):
return sum(
(width - i + 1) * (height - j + 1)
for i in xrange(1, width+1)
for j in xrange(1, height+1)
if i != j
)
| StarcoderdataPython |
151559 | <filename>One/script.py
file = open("input.txt", "r")
input = file.next()
sequence = input.split(", ")
class walker:
def __init__(self):
self.east = 0
self.south = 0
self.facing = 0
self.tiles = {}
def turnL(self):
if self.facing == 0:
self.facing = 3
else:
self.facing -= 1
def turnR(self):
if self.facing == 3:
self.facing = 0
else:
self.facing += 1
def walk(self,dist):
for i in range(0, dist):
if self.facing == 0:
self.south -= 1
elif self.facing == 1:
self.east += 1
elif self.facing == 2:
self.south += 1
else:
self.east -= 1
if self.kek():
return True
self.addTile(self.east,self.south)
return False
def totalDist(self):
return abs(self.east) + abs(self.south)
def addTile(self, x, y):
if x in self.tiles:
self.tiles[x].append(y)
else:
self.tiles[x] = [y]
def kek(self):
if self.east in self.tiles:
if self.south in self.tiles[self.east]:
return True
return False
w = walker()
for s in sequence:
if s[0] == "R":
w.turnR()
else:
w.turnL()
if w.walk(int(s[1:])):
break
print w.totalDist() | StarcoderdataPython |
3248577 | <filename>pyoneer/metrics/metrics_test.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from pyoneer import metrics
class MetricsTest(tf.test.TestCase):
def test_mape_fn(self):
y_true = tf.constant([[0.2, 0.1], [0.3, 0.2], [0.1, 0.2]], dtype=tf.float32)
y_pred = tf.constant([[0.1, 0.1], [0.2, 0.1], [0.2, 0.2]], dtype=tf.float32)
result = metrics.mape(y_true, y_pred)
expected = tf.constant([0.25, 0.416667, 0.5], dtype=tf.float32)
self.assertAllClose(result, expected)
def test_smape_fn(self):
y_true = tf.constant([[0.2, 0.1], [0.3, 0.2], [0.1, 0.2]], dtype=tf.float32)
y_pred = tf.constant([[0.1, 0.1], [0.2, 0.1], [0.2, 0.2]], dtype=tf.float32)
result = metrics.smape(y_true, y_pred)
expected = tf.constant([0.333333, 0.533333, 0.333333], dtype=tf.float32)
self.assertAllClose(result, expected)
def test_mape(self):
y_true = tf.constant([[0.2, 0.1], [0.3, 0.2], [0.1, 0.2]], dtype=tf.float32)
y_pred = tf.constant([[0.1, 0.1], [0.2, 0.1], [0.2, 0.2]], dtype=tf.float32)
sample_weight = tf.constant([1.0, 0.0, 1.0], dtype=tf.float32)
metric = metrics.MAPE()
metric.update_state(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(metric.result(), 0.375)
def test_smape(self):
y_true = tf.constant([[0.3, 0.1], [0.3, 0.3], [0.1, 0.2]], dtype=tf.float32)
y_pred = tf.constant([[0.1, 0.1], [0.2, 0.1], [0.3, 0.2]], dtype=tf.float32)
sample_weight = tf.constant([1.0, 0.0, 1.0], dtype=tf.float32)
metric = metrics.SMAPE()
metric.update_state(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(metric.result(), 0.5)
if __name__ == "__main__":
tf.test.main()
| StarcoderdataPython |
3273620 | <filename>flask_app/dash/orange/functions.py
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
from wordcloud import WordCloud
from io import BytesIO
import base64
from flask_app.helpers.graphs import multi_color_func
from flask_app.helpers.colors import plasma_15
from flask_app.helpers.lo_columns import columns, names
import matplotlib.pyplot as plt
import plotly.graph_objs as go
import plotly.express as px
from dash_table import DataTable
import numpy as np
from flask_app.helpers.preprocess_text import get_top_n_words, get_top_n2_words
import re
import ast
from config import basedir
from os import path
pd.options.display.max_colwidth = None
cols_dict = dict(zip(columns, names))
def get_value_counts(df, column, match=None):
if match:
return df[column].value_counts()[match]
return df[column].value_counts()
def make_local_df(df, column, choice):
supplier_df = df[df[column] == choice]
return supplier_df
def make_counter_row(df, column, name):
if column == "lo_id":
orange_line = None
black_line = dbc.Col([html.Hr(id="hr_black")], className="px-0")
qty = dbc.Col(
[
html.H5(
["\u00A0%s" % len(list(df[column].unique()))], id=f"{column}_qty"
),
html.H5(
["\u00A0" + name.upper() + "\u00A0"],
id=f"{column}_counter_name"
)
],
width={"offset": 2},
className="d-flex flex-row align-items-center h-50"
)
else:
orange_line = dbc.Col([html.Hr(id="hr_orange")], className="px-0 col-2")
black_line = None
qty = dbc.Col(
[
html.H5(
["\u00A0%s" % len(list(df[column].unique()))], id=f"{column}_qty"
),
html.H5(
[ "\u00A0" + name.upper() + "\u00A0"],
id=f"{column}_counter_name"
)
],
className="d-flex flex-row align-items-center pl-0 h-50"
)
counter_row = dbc.Row(
[
orange_line,
qty,
black_line
],
style={"height": "25%"},
className="d-flex align-items-center"
)
return counter_row
def make_footer(content):
footer = dbc.CardFooter(
[
html.P(content)
],
className="w-100 text-center py-2 border-0"
)
return footer
def make_dd(df, column):
options = list(
{"label": name, "value": name} for name in df[column].dropna().unique()
)
options.insert(0, {"label": "tous les fournisseurs".upper(), "value": "ALL"})
dd_col = dbc.Col(
[
dcc.Dropdown(
id=f"{column}_drop",
options=options,
style={
"width": "100%",
"color": "#000",
'display': 'inline-block',
"fontWeight": "900"
},
clearable=False
)
],
style={
},
className="d-flex flex-column align-items-center justify-content-center my-1 w-100"
)
return dd_col
def make_cloud(df, column, view_env, from_frequencies=False, stopwords=[]):
font_path = path.join(basedir, "flask_app/static/fonts/coolvetica_rg.ttf")
if not from_frequencies:
corpus = [' '.join(vocab.split()) if isinstance(vocab, str) else ' '.join(vocab) for vocab in df[column]]
cloud = WordCloud(
background_color="white",
font_path=font_path,
stopwords=set(stopwords),
max_words=1000,
random_state=0,
max_font_size=90,
contour_width=1,
collocations=False,
height=150
).generate(' '.join(corpus))
elif from_frequencies:
cloud = WordCloud(
background_color="white",
font_path=font_path,
stopwords=set(stopwords),
max_words=1000,
random_state=0,
max_font_size=90,
contour_width=1,
collocations=False,
).generate_from_frequencies(df[column].value_counts())
cloud.recolor(color_func=multi_color_func)
if view_env == "notebook":
fig = plt.figure(figsize=[14, 7])
ax = plt.imshow(cloud, interpolation="bilinear")
plt.axis("off")
return fig.show()
elif view_env == "dash":
img = BytesIO()
cloud.to_image().save(img, format='PNG')
src = "data:image/png;base64,{}".format(base64.b64encode(img.getvalue()).decode())
return src
def make_histo(df, column, n=10):
x = df[column].value_counts().keys().tolist()[:n]
y = df[column].value_counts().values.tolist()[:n]
fig = go.Figure(
data=[
go.Bar(
x=x,
y=y,
text=y,
insidetextfont=dict(color="#fff"),
# textfont=dict(size=20),
hovertemplate='<b>%{label}</b><br>%{value}<extra></extra>',
textposition='inside',
marker=dict(
color=[plasma_15[i] for i in range(n)],
line=dict(
color='#000000',
width=1
)
)
)
],
layout=go.Layout(
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
autosize=True,
showlegend=False,
margin=dict(t=0, b=0, l=0, r=0),
xaxis=dict(showticklabels=False),
hoverlabel=dict(
bgcolor="#000",
font=dict(
family="Helvetica",
# size=22,
color="white",
)
)
)
)
return fig
def make_pie(df, column, n=7, showlegend=False):
labels = df[column].dropna().value_counts().keys().tolist()[:n]
# labels = [label[:12] for label in labels]
values = df[column].value_counts().values.tolist()[:n]
if column == "active_status":
colors = plasma_15
elif column == "lo_type":
colors = plasma_15[2:]
else:
colors = plasma_15[::-1]
# random.shuffle(colors)
fig = go.Figure(
data=[
go.Pie(
labels=labels,
values=values,
textinfo='percent',
hovertemplate='<b>%{label}</b><br>%{value} L.O.<extra></extra>',
insidetextorientation="radial",
textfont=dict(size=16),
textposition='inside',
marker=dict(
colors=[colors[i] for i in range(n)],
line=dict(
color='#000000',
width=1
)
)
)
],
layout=go.Layout(
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
autosize=True,
showlegend=showlegend,
margin=dict(t=0, b=0, l=0, r=0),
legend=dict(
orientation='v',
font=dict(size=14),
yanchor="middle",
xanchor="left",
y=0.5,
x=1
),
hoverlabel=dict(
bgcolor="#000",
font=dict(
family="Helvetica",
# size=18,
color="white",
)
)
)
)
return fig
def make_sun(df, ind, col_1, col_2, reverse=False):
sun_df = df[[ind, col_1, col_2]].copy()
df['tx'] = pd.Series(dtype=float)
sun_df.columns = [ind, col_1, col_2]
pivot_sun = sun_df.pivot_table(index=[col_1, col_2], values=ind, aggfunc='count', margins=False)
ndf = pd.DataFrame(pivot_sun.to_records())
if reverse:
fig = px.sunburst(ndf, path=[col_2, col_1], values=ind,
color_discrete_sequence=plasma_15[2:])
else:
fig = px.sunburst(ndf, path=[col_1, col_2], values=ind,
color_discrete_sequence=plasma_15)
fig.update_traces(
textinfo='label+percent entry',
insidetextorientation='radial',
textfont_size=18,
hovertemplate='<b>%{parent}</b><br><b>%{label}</b><br>Nb of L.O.: %{value}'
)
fig.update_layout(
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
autosize=True,
title_font_size=14,
margin=dict(t=0, l=0, r=0, b=0),
hoverlabel=dict(
bgcolor="#000",
font=dict(
family="Coolvetica",
# size=18,
color="white"
)
)
)
return fig
def make_graduated_bar(dtf, column):
rate = 1 - (len(dtf[dtf[column].isna() == True]) / dtf.shape[0])
return rate * 10
def make_recap_table(df, ind, col_1, col_2):
recap_df = df.pivot_table(index=[col_2], values=[ind], columns=col_1, aggfunc='count', margins=True)
recap_df['taux inactif'] = [round(recap_df.loc[lo_type, (ind, 0)] / (
recap_df.loc[lo_type, (ind, 0)] + recap_df.loc[lo_type, (ind, 1)]) * 100, 0) for
lo_type in recap_df.index]
recap_df['taux inactif'] = recap_df['taux inactif'].apply(lambda x: f'{int(x)}%' if not np.isnan(x) else 0)
for column in [1, 0, 'All']:
recap_df.loc[:, (ind, column)] = recap_df.loc[:, (ind, column)].apply(
lambda x: f'{int(x)}' if not np.isnan(x) else 0)
recap_df = pd.DataFrame(recap_df.to_records())
recap_df[col_2] = recap_df[col_2].apply(lambda x: x.upper())
recap_df.columns = ['', 'active', 'not_active', 'All', 'taux inactif']
recap_df = recap_df[['', 'taux inactif']].copy()
recap_df.set_index('', drop=True)
table = DataTable(
id=f"{col_1}_{col_2}_table",
data=recap_df.to_dict('records'),
columns=[{'id': c, 'name': c.upper()} for c in recap_df.columns],
style_as_list_view=True,
style_data={
"lineHeight": "1vmin"
},
style_cell_conditional=[
{
'if': {'column_id': 'taux inactif'},
'textAlign': 'center',
}
],
style_data_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': '#ddd'
}
],
style_header={
'backgroundColor': '#ff7900',
'fontWeight': 'bold'
}
)
return table
def make_top_words(df, grams, stopwords=[]):
n = 10
docs = [' '.join(vocab_list.split()) for vocab_list in df['lemma_lo_description']]
colors = plasma_15[:n]
colors.reverse()
if grams == 'uni':
top_words = get_top_n_words
top_words = top_words(docs, stopwords, n)
top_df = pd.DataFrame(top_words)
top_df.columns = ["Word", "Freq"]
top_words = top_df["Word"][:n].to_list()
top_words.reverse()
top_frq = top_df["Freq"][:n].to_list()
top_frq.reverse()
top_words = {
"data": [
{
"y": top_words,
"x": top_frq,
"type": "bar",
"name": "",
"orientation": "h",
'marker': {'color': colors}
}
],
"layout": {
"margin": dict(t=0, b=20, l=100, r=0, pad=0),
}
}
fig = go.Figure(top_words)
return fig
elif grams == 'bi':
top_words = get_top_n2_words
top_words = top_words(docs, stopwords, n)
top_df = pd.DataFrame(top_words)
top_df.columns = ["Word", "Freq"]
top_words = top_df["Word"][:n].to_list()
top_words.reverse()
top_frq = top_df["Freq"][:n].to_list()
top_frq.reverse()
top_words = {
"data": [
{
"y": top_words,
"x": top_frq,
"type": "bar",
"name": "",
"orientation": "h",
'marker': {'color': colors}
}
],
"layout": {
"margin": dict(t=0, b=20, l=100, r=0, pad=0),
"height": 300
}
}
fig = go.Figure(top_words)
return fig
def keywords_to_df(keywords):
kwds = re.sub("'", '"', keywords)
kw_dict = ast.literal_eval(kwds)
kwdf = pd.DataFrame.from_dict(kw_dict, orient="index")
kwdf.reset_index(drop=False, inplace=True)
kwdf.columns = ["keyword", "score tfidf"]
return kwdf
| StarcoderdataPython |
118914 | <gh_stars>0
import unittest
from alfred_client.message import PushData
from alfred_client.packet.struct import alfred_packet
expected_mac = '5e:5c:ce:ca:93:58'
expected_message = 'raspberrypi\n'
expected_length = len(expected_message)
expected_bytes = b'\x00\x00\x00\x1a\x00\x00\x00\x00^\\\xce\xca\x93Xc\x00\x00\x0craspberrypi\n' # noqa
class TestCreatePushData(unittest.TestCase):
def setUp(self):
self.message = PushData()
self.message.source_mac_address = expected_mac
def test_message_type(self):
self.assertEqual(self.message.type, 0)
def test_source_mac_address(self):
self.assertEqual(self.message.source_mac_address, expected_mac)
def test_add_data_block(self):
self.message.add_data_block(99, 0, expected_message)
self.assertIn('length', self.message._data[0])
def test_calculates_correct_data_value_length(self):
self.message.add_data_block(99, 0, expected_message)
self.assertEqual(self.message._data[0]['length'], expected_length)
def test_calculates_correct_message_length(self):
self.message.add_data_block(99, 0, expected_message)
self.assertEqual(self.message.length, 26)
def test_bytes(self):
self.message.add_data_block(99, 0, expected_message)
self.assertEqual(bytes(self.message), expected_bytes)
class TestParsePushData(unittest.TestCase):
def setUp(self):
self.container = alfred_packet.parse(expected_bytes)
self.message = PushData(self.container)
def test_message_type(self):
self.assertEqual(self.message.type, 0)
def test_message_version(self):
self.assertEqual(self.message.version, 0)
def test_message_length(self):
self.assertEqual(self.message.length, 26)
def test_transaction_id(self):
self.assertEqual(self.message.transaction_id, 0)
def test_sequence_number(self):
self.assertEqual(self.message.sequence_number, 0)
@unittest.skip('TODO: move _data to packet_body.alfred_data')
def test_alfred_data_length(self):
self.assertEqual(len(self.message.alfred_data), 1)
| StarcoderdataPython |
91064 | <gh_stars>0
from . import *
class AWS_KinesisAnalytics_ApplicationOutput_KinesisFirehoseOutput(CloudFormationProperty):
def write(self, w):
with w.block("kinesis_firehose_output"):
self.property(w, "ResourceARN", "resource_arn", StringValueConverter())
self.property(w, "RoleARN", "role_arn", StringValueConverter())
class AWS_KinesisAnalytics_Application_CSVMappingParameters(CloudFormationProperty):
def write(self, w):
with w.block("csv_mapping_parameters"):
self.property(w, "RecordRowDelimiter", "record_row_delimiter", StringValueConverter())
self.property(w, "RecordColumnDelimiter", "record_column_delimiter", StringValueConverter())
class AWS_KinesisAnalytics_ApplicationReferenceDataSource_CSVMappingParameters(CloudFormationProperty):
def write(self, w):
with w.block("csv_mapping_parameters"):
self.property(w, "RecordRowDelimiter", "record_row_delimiter", StringValueConverter())
self.property(w, "RecordColumnDelimiter", "record_column_delimiter", StringValueConverter())
class AWS_KinesisAnalytics_Application_JSONMappingParameters(CloudFormationProperty):
def write(self, w):
with w.block("json_mapping_parameters"):
self.property(w, "RecordRowPath", "record_row_path", StringValueConverter())
class AWS_KinesisAnalytics_ApplicationOutput_DestinationSchema(CloudFormationProperty):
def write(self, w):
with w.block("destination_schema"):
self.property(w, "RecordFormatType", "record_format_type", StringValueConverter())
class AWS_KinesisAnalytics_ApplicationReferenceDataSource_S3ReferenceDataSource(CloudFormationProperty):
def write(self, w):
with w.block("s3_reference_data_source"):
self.property(w, "BucketARN", "bucket_arn", StringValueConverter())
self.property(w, "FileKey", "file_key", StringValueConverter())
self.property(w, "ReferenceRoleARN", "reference_role_arn", StringValueConverter())
class AWS_KinesisAnalytics_Application_MappingParameters(CloudFormationProperty):
def write(self, w):
with w.block("mapping_parameters"):
self.block(w, "JSONMappingParameters", AWS_KinesisAnalytics_Application_JSONMappingParameters)
self.block(w, "CSVMappingParameters", AWS_KinesisAnalytics_Application_CSVMappingParameters)
class AWS_KinesisAnalytics_ApplicationOutput_KinesisStreamsOutput(CloudFormationProperty):
def write(self, w):
with w.block("kinesis_streams_output"):
self.property(w, "ResourceARN", "resource_arn", StringValueConverter())
self.property(w, "RoleARN", "role_arn", StringValueConverter())
class AWS_KinesisAnalytics_Application_KinesisStreamsInput(CloudFormationProperty):
def write(self, w):
with w.block("kinesis_streams_input"):
self.property(w, "ResourceARN", "resource_arn", StringValueConverter())
self.property(w, "RoleARN", "role_arn", StringValueConverter())
class AWS_KinesisAnalytics_ApplicationReferenceDataSource_JSONMappingParameters(CloudFormationProperty):
def write(self, w):
with w.block("json_mapping_parameters"):
self.property(w, "RecordRowPath", "record_row_path", StringValueConverter())
class AWS_KinesisAnalytics_Application_RecordColumn(CloudFormationProperty):
def write(self, w):
with w.block("record_column"):
self.property(w, "Mapping", "mapping", StringValueConverter())
self.property(w, "SqlType", "sql_type", StringValueConverter())
self.property(w, "Name", "name", StringValueConverter())
class AWS_KinesisAnalytics_ApplicationReferenceDataSource_RecordColumn(CloudFormationProperty):
def write(self, w):
with w.block("record_column"):
self.property(w, "Mapping", "mapping", StringValueConverter())
self.property(w, "SqlType", "sql_type", StringValueConverter())
self.property(w, "Name", "name", StringValueConverter())
class AWS_KinesisAnalytics_Application_RecordFormat(CloudFormationProperty):
def write(self, w):
with w.block("record_format"):
self.block(w, "MappingParameters", AWS_KinesisAnalytics_Application_MappingParameters)
self.property(w, "RecordFormatType", "record_format_type", StringValueConverter())
class AWS_KinesisAnalytics_Application_KinesisFirehoseInput(CloudFormationProperty):
def write(self, w):
with w.block("kinesis_firehose_input"):
self.property(w, "ResourceARN", "resource_arn", StringValueConverter())
self.property(w, "RoleARN", "role_arn", StringValueConverter())
class AWS_KinesisAnalytics_Application_InputParallelism(CloudFormationProperty):
def write(self, w):
with w.block("input_parallelism"):
self.property(w, "Count", "count", BasicValueConverter())
class AWS_KinesisAnalytics_Application_InputLambdaProcessor(CloudFormationProperty):
def write(self, w):
with w.block("input_lambda_processor"):
self.property(w, "ResourceARN", "resource_arn", StringValueConverter())
self.property(w, "RoleARN", "role_arn", StringValueConverter())
class AWS_KinesisAnalytics_ApplicationOutput_LambdaOutput(CloudFormationProperty):
def write(self, w):
with w.block("lambda_output"):
self.property(w, "ResourceARN", "resource_arn", StringValueConverter())
self.property(w, "RoleARN", "role_arn", StringValueConverter())
class AWS_KinesisAnalytics_ApplicationOutput_Output(CloudFormationProperty):
def write(self, w):
with w.block("output"):
self.block(w, "DestinationSchema", AWS_KinesisAnalytics_ApplicationOutput_DestinationSchema)
self.block(w, "LambdaOutput", AWS_KinesisAnalytics_ApplicationOutput_LambdaOutput)
self.block(w, "KinesisFirehoseOutput", AWS_KinesisAnalytics_ApplicationOutput_KinesisFirehoseOutput)
self.block(w, "KinesisStreamsOutput", AWS_KinesisAnalytics_ApplicationOutput_KinesisStreamsOutput)
self.property(w, "Name", "name", StringValueConverter())
class AWS_KinesisAnalytics_Application_InputSchema(CloudFormationProperty):
def write(self, w):
with w.block("input_schema"):
self.property(w, "RecordEncoding", "record_encoding", StringValueConverter())
self.repeated_block(w, "RecordColumns", AWS_KinesisAnalytics_Application_RecordColumn)
self.block(w, "RecordFormat", AWS_KinesisAnalytics_Application_RecordFormat)
class AWS_KinesisAnalytics_ApplicationReferenceDataSource_MappingParameters(CloudFormationProperty):
def write(self, w):
with w.block("mapping_parameters"):
self.block(w, "JSONMappingParameters", AWS_KinesisAnalytics_ApplicationReferenceDataSource_JSONMappingParameters)
self.block(w, "CSVMappingParameters", AWS_KinesisAnalytics_ApplicationReferenceDataSource_CSVMappingParameters)
class AWS_KinesisAnalytics_Application_InputProcessingConfiguration(CloudFormationProperty):
def write(self, w):
with w.block("input_processing_configuration"):
self.block(w, "InputLambdaProcessor", AWS_KinesisAnalytics_Application_InputLambdaProcessor)
class AWS_KinesisAnalytics_ApplicationOutput(CloudFormationResource):
cfn_type = "AWS::KinesisAnalytics::ApplicationOutput"
tf_type = "aws_kinesis_analytics_application_output" # TODO: Most likely not working
ref = "arn"
attrs = {}
def write(self, w):
with self.resource_block(w):
self.property(w, "ApplicationName", "application_name", StringValueConverter())
self.block(w, "Output", AWS_KinesisAnalytics_ApplicationOutput_Output)
class AWS_KinesisAnalytics_Application_Input(CloudFormationProperty):
def write(self, w):
with w.block("input"):
self.property(w, "NamePrefix", "name_prefix", StringValueConverter())
self.block(w, "InputSchema", AWS_KinesisAnalytics_Application_InputSchema)
self.block(w, "KinesisStreamsInput", AWS_KinesisAnalytics_Application_KinesisStreamsInput)
self.block(w, "KinesisFirehoseInput", AWS_KinesisAnalytics_Application_KinesisFirehoseInput)
self.block(w, "InputProcessingConfiguration", AWS_KinesisAnalytics_Application_InputProcessingConfiguration)
self.block(w, "InputParallelism", AWS_KinesisAnalytics_Application_InputParallelism)
class AWS_KinesisAnalytics_ApplicationReferenceDataSource_RecordFormat(CloudFormationProperty):
def write(self, w):
with w.block("record_format"):
self.block(w, "MappingParameters", AWS_KinesisAnalytics_ApplicationReferenceDataSource_MappingParameters)
self.property(w, "RecordFormatType", "record_format_type", StringValueConverter())
class AWS_KinesisAnalytics_ApplicationReferenceDataSource_ReferenceSchema(CloudFormationProperty):
def write(self, w):
with w.block("reference_schema"):
self.property(w, "RecordEncoding", "record_encoding", StringValueConverter())
self.repeated_block(w, "RecordColumns", AWS_KinesisAnalytics_ApplicationReferenceDataSource_RecordColumn)
self.block(w, "RecordFormat", AWS_KinesisAnalytics_ApplicationReferenceDataSource_RecordFormat)
class AWS_KinesisAnalytics_ApplicationReferenceDataSource_ReferenceDataSource(CloudFormationProperty):
def write(self, w):
with w.block("reference_data_source"):
self.block(w, "ReferenceSchema", AWS_KinesisAnalytics_ApplicationReferenceDataSource_ReferenceSchema)
self.property(w, "TableName", "table_name", StringValueConverter())
self.block(w, "S3ReferenceDataSource", AWS_KinesisAnalytics_ApplicationReferenceDataSource_S3ReferenceDataSource)
class AWS_KinesisAnalytics_ApplicationReferenceDataSource(CloudFormationResource):
cfn_type = "AWS::KinesisAnalytics::ApplicationReferenceDataSource"
tf_type = "aws_kinesis_analytics_application_reference_data_source" # TODO: Most likely not working
ref = "arn"
attrs = {}
def write(self, w):
with self.resource_block(w):
self.property(w, "ApplicationName", "application_name", StringValueConverter())
self.block(w, "ReferenceDataSource", AWS_KinesisAnalytics_ApplicationReferenceDataSource_ReferenceDataSource)
class AWS_KinesisAnalytics_Application(CloudFormationResource):
cfn_type = "AWS::KinesisAnalytics::Application"
tf_type = "aws_kinesis_analytics_application"
ref = "id"
attrs = {} # Additional TF attributes: arn, create_timestamp, last_update_timestamp, status, version
def write(self, w):
with self.resource_block(w):
self.property(w, "ApplicationName", "name", StringValueConverter())
self.repeated_block(w, "Inputs", AWS_KinesisAnalytics_Application_Input)
self.property(w, "ApplicationDescription", "description", StringValueConverter())
self.property(w, "ApplicationCode", "code", StringValueConverter())
| StarcoderdataPython |
1727990 | import numpy as np
import theano
import theano.tensor as T
import math
import keras
import tensorflow as tf
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Activation
import h5py
from keras.optimizers import Adamax, Nadam
import sys
from writeNNet import saveNNet
from matplotlib import plt
######## OPTIONS #########
ver = 4 # Neural network version
hu = 45 # Number of hidden units in each hidden layer in network
saveEvery = 10 # Epoch frequency of saving
totalEpochs = 200 # Total number of training epochs
trainingDataFiles = "../TrainingData/VertCAS_TrainingData_v2_%02d.h5" # File format for training data
nnetFiles = "../networks/VertCAS_pra%02d_v%d_45HU_%03d.nnet" # File format for .nnet files
##########################
# CONSTANTS
Rp = 4000
Rv = 575.4
pra = SCL1500
ra = CL1500
pd = 0
eps = 3
params = advisoryParams(ra, pd=pd, )
# Get variables from params dict:
w = params['w']
vlo = params['vlo']
alo = params['alo']
ahi = params['ahi']
wr = params['wr']
vlor = params['vlor']
ws = params['ws']
vlos = params['vlos']
eps = params['eps']
pd = params['pd']
# *****************************
v = [-30, -1]
vmin, vmax = v
hInitLower = 0
# traj = getNominalTraj(vmin,vmin,w*alo,0,pd,hInitLower,isLower=True
Hp = 100 # Height of NMAC
G = 32.2 # Gravitational acceleration
# CONSTANTS
HP = 100 # Height of NMAC
G = 32.2 # Graviational acceleration
'''
Represents quadratic boundaries that take the form:
h = coeffs[0] + coeffs[1]*(t-minTau) + 0.5coeffs[2]*(t-minTau)^2
'''
# The previous RA should be given as a command line input
# implement safe regions
advisory = advisoryParams(ra, pd=pd, eps=eps)
_, _, boundMin, boundMax = getSafeable(advisory, v, worstCase=False)
def safe_region(r):
""" Argument:
r varies between 0nmi and 7nmi
Hp between −4,000ft and 4,000ft
Rv = between 0kts and 1,000kts
V and vI between −5,000 and +5,000ft/min
w is either -1 or 1
The accelation ao is g/2 where g is the gravitational acceleration
"""
"""return:
d, the vertical distance between the safe region and the intruder.
If the intruder is in the safe region, d = 0.
bound-1,
bound-2,
bound-3
"""
minH = boundMin[0].getH_minTau()
maxH = boundMax[0].getH_minTau()
maxTau = boundMax[-1].getMaxTau()
if -Rp <= r & r < -Rp - r * v * np.minimum(0, w * v) / alo:
bound_1 = alo / 2.0 * np.square(r + Rp) + w * Rv * v * np.sum(r, Rp) - Rv * 2.0 * Hp
if satisfiesBounds(bound_1 + boundMin, maxH, maxTau):
d = maxH - bound_1
return d
elif -Rp - Rv * np.minimum(0, np.dot(w, v)) / alo <= r <= Rp - Rv * np.minimum(0, np.dot(w, v)) / alo:
bound_2 = np.dot(w, maxH) < -np.square(np.minimum(0, np.dot(w, maxH))) / 2.0 * alo - Hp
if satisfiesBounds(bound_2 + boundMin, maxH, maxTau):
d = maxH - bound_2
return d
elif Rp - Rv * np.minimum(0, np.dot(w, v)) / alo < r <= Rp + Rv * np.maximum(0, w * np.subtract(vlo - v)) / alo:
bound_3 = alo / 2.0 * np.square(r - Rp) + w * Rv * v * np.subtract(r, Rp) - Rv * 2.0 * Hp
if satisfiesBounds(bound_3 + boundMin, maxH, maxTau):
d = maxH - bound_3
return d
# The previous RA should be given as a command line input
if len(sys.argv) > 1:
pra = int(sys.argv[1])
print("Loading Data for VertCAS, pra %02d, Network Version %d" % (pra, ver))
f = h5py.File(trainingDataFiles % pra, 'r')
X_train = np.array(f['X'])
Q = np.array(f['y'])
means = np.array(f['means'])
ranges = np.array(f['ranges'])
min_inputs = np.array(f['min_inputs'])
max_inputs = np.array(f['max_inputs'])
N, numOut = Q.shape
print("Setting up Model")
# Asymmetric loss function
lossFactor = 40.0
lambd = 2
def asymMSE(y_true, y_pred):
distance = safe_region(20)
# train the neural network by penalizing every distance that's grater than 0
d = (y_true - y_pred) - tf.cast(lambd * tf.keras.backend.minimum(0, distance), dtype=tf.float32)
maxes = tf.keras.backend.argmax(y_true, axis=-1)
maxes_onehot = tf.keras.backend.one_hot(maxes, numOut)
others_onehot = maxes_onehot - 1
d_opt = d * maxes_onehot
d_sub = d * others_onehot
a = lossFactor * (numOut - 1) * (d_opt 2 + keras.backend.abs(d_opt))
b = d_opt tf.keras.backend.constant(2)
c = lossFactor * (d_sub 2 + keras.backend.abs(d_sub))
d = d_sub tf.keras.backend.constant(2)
loss = tf.keras.backend.switch(d_sub > 0, c, d) + tf.keras.backend.switch(d_opt > 0, a, b)
return loss
if len(sys.argv) > 1:
pra = int(float(sys.argv[1]))
print("Loading Data for VertCAS, pra %02d, Network Version %d" % (pra, ver))
f = h5py.File(trainingDataFiles % pra, 'r')
X_train = np.array(f['X'])
Q = np.array(f['y'])
means = np.array(f['means'])
ranges = np.array(f['ranges'])
min_inputs = np.array(f['min_inputs'])
max_inputs = np.array(f['max_inputs'])
N, numOut = Q.shape
print("Setting up Model")
# Asymmetric loss function
lossFactor = 40.0
# Define model architecture
model = Sequential()
model.add(Dense(hu, activation='relu', input_dim=4))
model.add(Dense(hu, activation='relu'))
model.add(Dense(hu, activation='relu'))
model.add(Dense(hu, activation='relu'))
model.add(Dense(hu, activation='relu'))
model.add(Dense(hu, activation='relu'))
model.add(Dense(numOut))
opt = Nadam(lr=0.0003)
model.compile(loss=asymMSE, optimizer=opt, metrics=['accuracy'])
# Train and write nnet files
epoch = saveEvery
while epoch <= totalEpochs:
model.fit(X_train, Q, epochs=saveEvery, batch_size=2 ** 8, shuffle=True)
saveFile = nnetFiles % (pra, ver, epoch)
saveNNet(model, saveFile, means, ranges, min_inputs, max_inputs)
epoch += saveEvery
plt.scatter()
| StarcoderdataPython |
4814846 | <gh_stars>0
from django.db import models
from users.models import User
class Empresa(models.Model):
AGUA = 'AG'
FUEGO = 'FG'
TIERRA = 'TR'
AIRE = 'AR'
TIPO = (
(AGUA, 'agua'),
(FUEGO, 'fuego'),
(TIERRA, 'tierra'),
(AIRE, 'aire'),
)
nombre = models.CharField(max_length=60, default='')
email = models.EmailField(default='', blank=True, null=True)
giro = models.CharField(max_length=30, default='')
tipo = models.CharField(max_length=20, default='', choices=TIPO)
direccion = models.CharField(max_length=254, blank=True, null=True)
web = models.URLField(default='', blank=True, null=True)
nota = models.TextField(blank=True, null=True)
def __str__(self):
return '%s' % self.nombre
class Contacto(models.Model):
SEÑOR = 'SR'
SEÑORA = 'SRA'
LICENCIADO = 'LIC'
INGENIERO = 'ING'
MAESTRO = 'MTR'
DOCTOR = 'DR'
TRATAMIENTOS = (
(SEÑOR, 'Sr.'),
(SEÑORA, 'Sra.'),
(LICENCIADO, 'Lic.'),
(INGENIERO, 'Ing.'),
(MAESTRO, 'Mtr.'),
(DOCTOR, 'Dr.'),
)
creado_por = models.ForeignKey(User)
tratamiento = models.CharField(max_length=3, choices=TRATAMIENTOS, default=SEÑOR)
nombre = models.CharField(max_length=90, default='')
email = models.EmailField(default='', blank=True, null=True)
empresa = models.ManyToManyField(Empresa)
cargo = models.CharField(max_length=48, default='', blank=True, )
web_personal = models.URLField(default='', blank=True, null=True)
nota = models.TextField(default='', blank=True, null=True)
imagen = models.ImageField(upload_to='contacts', blank=True, null=True)
fecha_de_creacion = models.DateTimeField(editable=False, auto_now=True, null=True)
def __str__(self):
return '%s' % self.nombre
| StarcoderdataPython |
26954 | import pygame
import sys
pygame.init()
screen = pygame.display.set_mode((640, 480))
clock = pygame.time.Clock()
x = 0
y = 0
# use a (r, g, b) tuple for color
yellow = (255, 255, 0)
# create the basic window/screen and a title/caption
# default is a black background
screen = pygame.display.set_mode((640, 280))
pygame.display.set_caption("Text adventures with Pygame")
# pick a font you have and set its size
myfont = pygame.font.SysFont(None, 30)
pygame.display.set_caption('Animation')
while 1:
clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
key = pygame.key.get_pressed()
if key[pygame.K_UP]:
y += 1
print(y)
elif key[pygame.K_DOWN]:
y -= 1
print(y)
elif key[pygame.K_RIGHT]:
x += 1
print(x)
elif key[pygame.K_LEFT]:
x -= 1
print(x)
pygame.display.flip()
pygame.quit() | StarcoderdataPython |
3332349 | from .diffusion import diffusion_gaussian, advection_diffusion_gaussian_2d
| StarcoderdataPython |
1711422 | from flask_restful import Resource, reqparse, request
from lib.objects.namespace import Namespace
from lib.objects.lock import Lock
class LockController(Resource):
# TODO Check access as separate method or decorator
# https://flask-restful.readthedocs.io/en/latest/extending.html#resource-method-decorators
parser = reqparse.RequestParser()
parser.add_argument(
"ttl", type=int, default=60, help="Time for lock to live without refreshes"
)
def __init__(self, storage):
self.storage = storage
def put(self, namespace_id: str, lock_id: str):
namespace = Namespace(storage=self.storage, id=namespace_id)
if not namespace.validate_id():
return {"message": "Wrong namespace"}, 400
if not namespace.read():
return {"message": "Namespace not found", "lock": None}, 404
token = request.headers.get("X-Getlock-Auth")
if token != namespace.token:
return {"message": "Provided wrong auth token"}, 403
args = self.parser.parse_args(strict=True)
lock = Lock(storage=self.storage, id=lock_id, namespace=namespace)
if not lock.validate_id():
return {"message": "Wrong lock", "lock": None}, 400
if not lock.read():
message = "Lock created"
lock._load(**args)
lock.create()
else:
message = "Lock updated"
lock._load_self()
lock._load(**args)
lock.update()
return {"message": message, "lock": lock._dump()}, 201
def get(self, namespace_id: str, lock_id: str):
namespace = Namespace(storage=self.storage, id=namespace_id)
if not namespace.validate_id():
return {"message": "Wrong namespace"}, 400
if not namespace.read():
return {"message": "Namespace not found", "lock": None}, 404
lock = Lock(storage=self.storage, id=lock_id, namespace=namespace)
if not lock.validate_id():
return {"message": "Wrong lock", "lock": None}, 400
if not lock.read():
return {"message": "Lock not found", "lock": None}, 404
lock._load_self()
if lock.expired:
return {"message": "Lock has expired", "lock": lock._dump()}, 410
return {"message": "Lock found", "lock": lock._dump()}, 200
def delete(self, namespace_id: str, lock_id: str):
namespace = Namespace(storage=self.storage, id=namespace_id)
if not namespace.validate_id():
return {"message": "Wrong namespace"}, 400
if not namespace.read():
return {"message": "Namespace not found", "lock": None}, 404
token = request.headers.get("X-Getlock-Auth")
if token != namespace.token:
return {"message": "Provided wrong auth token"}, 403
lock = Lock(storage=self.storage, id=lock_id, namespace=namespace)
if not lock.validate_id():
return {"message": "Wrong lock", "lock": None}, 400
if not lock.read():
return {"message": "Lock not found", "lock": None}, 404
lock.delete()
return {"message": "Lock removed", "lock": lock._dump()}, 200
| StarcoderdataPython |
187187 | # Object Detector
# Developed by <NAME> : November 2018
#
# Developped on : Python 3.6.5.final.0 (Conda 4.5.11), OpenCV 3.4.1, Numpy 1.14.3
# The programs first extracts the circles (Hough Transform) on each frame,
# then compares each circle with the object using the SIFT detector.
# Execute as follows : detect.py -i positive.avi -o export.csv
import cv2
import numpy as np
import csv
import sys
# Extract circle array from an image
def image_circles(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray, 5)
rows = gray.shape[0]
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, rows / 8,
param1=70, param2=30,
minRadius=1, maxRadius=100)
return circles
# draw circles from circle array on an image
def draw_circles(image, circles):
if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
center = (i[0], i[1])
# circle center
cv2.circle(image, center, 1, (0, 100, 100), 3)
# circle outline
radius = i[2]
cv2.circle(image, center, radius, (255, 0, 255), 3)
# draw one circle from propreties array [x,y,radius] on an image
def draw_circle(image, circle):
if circle is not None:
circle = np.uint16(np.around(circle))
i = circle
center = (i[0], i[1])
# circle center
cv2.circle(image, center, 1, (0, 100, 100), 3)
# circle outline
radius = i[2]
cv2.circle(image, center, radius, (255, 0, 255), 3)
# draw a bounding box using the circle properties
def draw_box(image, circle):
if circle is not None:
circle = np.uint16(np.around(circle))
i = circle
xc, yc = (i[0], i[1])
radius = i[2]
# draw box
cv2.rectangle(image, (xc - radius, yc - radius), (xc + radius, yc + radius), (0, 255, 0), 3)
# crop image and avoid overpassing the limits
def image_crop(image, y, x, r):
y1, y2, x1, x2 = y - r, y + r, x - r, x + r
if x1 < 0:
x1 = 0
if x2 > image.shape[0]:
x2 = image.shape[0]
if y1 < 0:
y1 = 0
if y2 > image.shape[1]:
y2 = image.shape[1]
crop_img = image[x1:x2, y1:y2]
return crop_img
# return the number of matches between the keypoints of an image and the keypoints entered
def matches_number(sift, img, kp1, des1):
# kp1 : Keypoints of positive image
# des1 : descriptors of positive image
# find the keypoints and descriptors with SIFT
kp2, des2 = sift.detectAndCompute(img, None)
# If there is no keypoints and decsriptors
if not kp1 or not kp2:
return None
if len(kp1) <= 2 or len(kp2) <= 2:
return None
flann_index_kdtree = 0
index_params = dict(algorithm=flann_index_kdtree, trees=5)
search_params = dict(checks=50)
if len(des1 >= 2) and len(des2) >= 2:
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# store all the good matches as per Lowe's ratio test.
good = []
distance_min = 0.65
for m, n in matches:
if m.distance < distance_min * n.distance:
good.append(m)
return len(good)
# initialize csv file and erase old content
def csv_initialize(file):
with open(file, mode='w') as csv_file:
csv.writer(csv_file, delimiter=',', lineterminator='\n', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# add a row at the end of the csv file
def csv_addrow(file, circle, frameid):
circle = np.uint16(np.around(circle))
i = circle
xc, yc = (i[0], i[1])
radius = i[2]
with open(file, mode='a') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',', lineterminator='\n', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow([frameid, xc - radius, yc - radius, 2 * radius, 2 * radius])
# Interface management
def interface():
if not len(sys.argv) == 5 or not sys.argv[1] == "-i" or not sys.argv[3] == "-o":
raise Exception("Interface Error ! Use the following format : detector.py -i positive.avi -o export.csv")
return str(sys.argv[2]), str(sys.argv[4])
# ***********
# MAIN PROGRAM
# ***********
# ***********
video_file, export_file = interface()
cam = cv2.VideoCapture(video_file)
positive = cv2.imread('positive.png')
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(positive, None) # calculates the keypoints and the descriptors of the positive image
frameId = 0 # the current frame
csv_initialize(export_file)
# Parameters
threshold_value = 70
NB_matches_min = 7
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi', fourcc, 30.0, (640, 480))
# Loop of the video frames
while True:
frameId = frameId + 1
ret, image = cam.read()
# Thresholding image
retval, image_r = cv2.threshold(image, threshold_value, 255, cv2.THRESH_BINARY)
circles = image_circles(image_r) # Play on : image or image_r : to activate or disable thresholding
# if we have circles in frame
if np.count_nonzero(circles) != 0:
# Loop on the different circles
for circle in circles[0, :]:
x, y, r = circle.astype(int)
crop_img = image_crop(image, x, y, r)
NB_matches = matches_number(sift, crop_img, kp1, des1)
print("number of matches :", NB_matches)
if NB_matches is not None:
# if we have enough matches draw the box and add the coordinates to the export file
if NB_matches > NB_matches_min:
draw_box(image, circle)
csv_addrow(export_file, circle, frameId)
# write the flipped frame
out.write(image)
# draw_circles(image,circles) #to draw all circles given by hough transform
cv2.imshow('result', image)
if cv2.waitKey(10) == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
| StarcoderdataPython |
167893 | <reponame>HenryKenlay/grapht
from grapht.graphtools import has_isolated_nodes
from grapht.perturb import *
from grapht.sampling import sample_edges
import networkx as nx
def test_khop_remove():
G = nx.barabasi_albert_graph(500, 2)
r = 5
for k in range(1, 5):
Gp, edge_info, node = khop_remove(G, k, r)
# check only edges were deleted and exactly r were deleted
assert set(Gp.edges()).issubset(set(G.edges()))
assert len(G.edges()) - len(Gp.edges()) == r
assert edge_info['type'].unique()[0] == 'remove'
assert len(edge_info['type'].unique()) == 1
# make sure edges were from a k-hop neighbourhood
for u in edge_info['u']:
assert nx.dijkstra_path_length(G, u, node) <= r
for v in edge_info['v']:
assert nx.dijkstra_path_length(G, v, node) <= r
Gp, _, _ = khop_remove(G, k, r, enforce_connected=True, enforce_no_isolates=True)
assert nx.is_connected(Gp) and not has_isolated_nodes(Gp)
Gp, _, _ = khop_remove(G, k, r, enforce_connected=True, enforce_no_isolates=False)
assert nx.is_connected(Gp)
Gp, _, _ = khop_remove(G, k, r, enforce_connected=False, enforce_no_isolates=True)
assert not has_isolated_nodes(Gp)
def test_khop_rewire():
G = nx.barabasi_albert_graph(100, 3)
k, r = 3, 3
solution, edge_info, node = khop_rewire(G, k, r)
assert len(edge_info) == 2*r
assert len(edge_info['type'].unique()) == 2
def test_rewire():
G = nx.barabasi_albert_graph(100, 3)
stubs_before = set()
stubs_after = set()
for _, row in rewire(G, sample_edges(G, 3)).iterrows():
if row['type'] == 'remove':
stubs_before.add(row['u'])
stubs_before.add(row['v'])
elif row['type'] == 'add':
stubs_after.add(row['u'])
stubs_after.add(row['v'])
assert stubs_before == stubs_after | StarcoderdataPython |
3295301 | # Plotting module for neural analysis package
# Plot speed profile
def speed_profile(df):
import numpy as np
import plottools as pt
import matplotlib as mpl
mpl.use('PDF')
import matplotlib.pyplot as plt
import copy
# Create figure
ax_hndl = pt.create_subplot(1, 1)
# Iterate over trials and calculate speed
n_trials = df.shape[0]
s_all = []
for i in range(n_trials):
# Get time, speed
t = df['time'][i]
s = df['speed'][i]
# Align time so t=0 occurs at movement onset
onset_idx = df['onset_idx'][i]
t = t - t[onset_idx]
# Plot trajectory
plt.plot(t, s, 'k')
plt.plot(t[onset_idx], s[onset_idx], 'ko')
# Format figure
plt.xlim([-500, 1000])
plt.xlabel('Time (ms)')
plt.ylabel('Speed')
plt.suptitle('Reach speed')
# Save figure
fig_name = 'SpeedProfile'
plt.savefig('results/{}.pdf'.format(fig_name))
return None
# Plot reach trajectories
def reach_trajectories(df):
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
# Get unique targets
tc = df['targetCode'].values
n_trials = len(tc)
tc_unique = np.unique(tc)
# Setup figure
dpi = 100
ax_size = [400, 300] # axis size (pixels)
fig_size = [600, 600] # figure size (pixels)
fig_size = tuple(np.array(fig_size)/dpi)
fh = plt.figure(figsize=fig_size, dpi=dpi)
ax = fh.add_axes([0.15, 0.15, 0.7, 0.7])
# Get plotting colors
cmap = matplotlib.cm.get_cmap('hsv')
n_targ = len(tc_unique)
targ_col = cmap(np.linspace(0, 1, n_targ+1))
# Iterate over unique target codes and plot targets
patches = []
for t in tc_unique:
# Get target
tc_mask = np.in1d(tc, t)
pos = df['targetPosition'].values[tc_mask][0]
target_radius = df['targetRadius'].values[tc_mask][0]
# Plot target
targ_col = cmap(t/(n_targ+1))
circle = matplotlib.patches.Rectangle(pos - target_radius, target_radius*2, target_radius*2,
facecolor=targ_col,
linewidth=0.5,
edgecolor=[0, 0, 0],
alpha=0.5)
ax.add_patch(circle)
# Iterate over all trials and plot trajectories
for t in range(n_trials):
pos = df.pos[t]
tc = df.targetCode[t]
targ_col = cmap(tc / (n_targ + 1))
plt.plot(pos[0], pos[1], color=targ_col, linewidth=0.5)
plt.plot(pos[0, -1], pos[1, -1], 'ko', markersize=2)
# Format plot and save
ax_lim = (-200, 200)
ticks = [-100, 0, 100]
ax.set_xlim(ax_lim)
ax.set_ylim(ax_lim)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
plt.xlabel('X position')
plt.ylabel('Y position')
plt.suptitle('Memory-guided reach trajectories')
fig_name = 'ReachTrajectories'
save_dir = '/Users/alandegenhart/Documents/GitHub/python/results'
plt.savefig('results/{}.pdf'.format(fig_name))
plt.close(fh)
return None
| StarcoderdataPython |
34630 | <reponame>HK3-Lab-Team/pytrousse<filename>scripts/use_dataframe_with_info.py
import os
import time
from trousse.dataset import Dataset
df_sani_dir = os.path.join(
"/home/lorenzo-hk3lab/WorkspaceHK3Lab/",
"smvet",
"data",
"Sani_15300_anonym.csv",
)
metadata_cols = (
"GROUPS TAG DATA_SCHEDA NOME ID_SCHEDA COMUNE PROV MONTH YEAR BREED"
" SEX AGE SEXUAL STATUS BODYWEIGHT PULSE RATE RESPIRATORY RATE TEMP "
"BLOOD PRESS MAX BLOOD PRESS MIN BLOOD PRESS MEAN BODY CONDITION SCORE "
"HT H DEATH TIME OF DEATH PROFILO_PAZIENTE ANAMNESI_AMBIENTALE"
" ANAMNESI_ALIMENTARE VACCINAZIONI FILARIOSI GC_SEQ"
)
metadata_cols = tuple(metadata_cols.replace("\t", ",").split(","))
df_sani = Dataset(metadata_cols=metadata_cols, data_file=df_sani_dir)
time0 = time.time()
print(df_sani.column_list_by_type)
print(time.time() - time0)
whole_word_replace_dict = {
"---": None,
".": None,
"ASSENTI": "0",
"non disponibile": None,
"NV": None,
"-": None,
"Error": None,
# '0%': '0'
}
char_replace_dict = {"°": "", ",": "."}
| StarcoderdataPython |
178471 | <gh_stars>1-10
#!/usr/bin/python
# encoding: utf-8
"""
@author: xuk1
@license: (C) Copyright 2013-2017
@contact: <EMAIL>
@file: test.py
@time: 8/15/2017 10:38
@desc:
"""
| StarcoderdataPython |
148485 | <filename>src/elastic/azext_elastic/tests/latest/test_elastic_scenario.py
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import os
from azure.cli.testsdk import ScenarioTest
from azure.cli.testsdk import ResourceGroupPreparer
from .example_steps import step_monitor_create
from .example_steps import step_monitor_show
from .example_steps import step_monitor_list
from .example_steps import step_monitor_list2
from .example_steps import step_monitor_update
from .example_steps import step_deployment_info_list
from .example_steps import step_monitored_resource_list
from .example_steps import step_tag_rule_create
from .example_steps import step_tag_rule_show
from .example_steps import step_tag_rule_list
from .example_steps import step_tag_rule_delete
from .example_steps import step_vm_collection_update
from .example_steps import step_vm_host_list
from .example_steps import step_vm_ingestion_detail
from .example_steps import step_monitor_delete
from .. import (
try_manual,
raise_if,
calc_coverage
)
TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
# Env setup_scenario
@try_manual
def setup_scenario(test):
pass
# Env cleanup_scenario
@try_manual
def cleanup_scenario(test):
pass
# Testcase: Scenario
@try_manual
def call_scenario(test):
setup_scenario(test)
step_monitor_create(test, checks=[
test.check("name", "{myMonitor}", case_sensitive=False),
test.check("location", "westus2", case_sensitive=False),
test.check("sku.name", "ess-monthly-consumption_Monthly", case_sensitive=False),
test.check("tags.Environment", "Dev", case_sensitive=False),
])
step_monitor_show(test, checks=[
test.check("name", "{myMonitor}", case_sensitive=False),
test.check("location", "westus2", case_sensitive=False),
test.check("tags.Environment", "Dev", case_sensitive=False),
])
step_monitor_list(test, checks=[
test.check('length(@)', 37),
])
step_monitor_list2(test, checks=[
test.check('length(@)', 1),
])
step_monitor_update(test, checks=[
test.check("name", "{myMonitor}", case_sensitive=False),
test.check("location", "westus2", case_sensitive=False),
test.check("sku.name", "ess-monthly-consumption_Monthly", case_sensitive=False),
test.check("tags.Environment", "Dev", case_sensitive=False),
])
step_deployment_info_list(test, checks=[])
step_monitored_resource_list(test, checks=[])
step_tag_rule_create(test, checks=[])
step_tag_rule_show(test, checks=[])
step_tag_rule_list(test, checks=[])
# Error (ResourceDeletionFailed) Resource deletion failed as RP returned status code: 'UnprocessableEntity'
# step_tag_rule_delete(test, checks=[])
step_vm_collection_update(test, checks=[])
step_vm_host_list(test, checks=[])
step_vm_ingestion_detail(test, checks=[])
step_monitor_delete(test, checks=[])
cleanup_scenario(test)
# Test class for Scenario
@try_manual
class ElasticScenarioTest(ScenarioTest):
def __init__(self, *args, **kwargs):
super(ElasticScenarioTest, self).__init__(*args, **kwargs)
self.kwargs.update({
'subscription_id': self.get_subscription_id()
})
self.kwargs.update({
'myMonitor': 'myMonitor',
})
@ResourceGroupPreparer(name_prefix='clitestelastic_myResourceGroup'[:7], key='rg', parameter_name='rg')
def test_elastic_Scenario(self, rg):
call_scenario(self)
calc_coverage(__file__)
raise_if()
| StarcoderdataPython |
1648216 | import heapq
from abc import ABC, abstractmethod
class Queue(ABC):
"""
Abstract class for queues.
"""
def __init__(self):
self.list_elements = []
def empty(self) -> bool:
"""
Returns true if the queue is empty.
"""
return len(self.list_elements) == 0
def insert(self, item) -> None:
"""
Inserts the item into the queue.
"""
self.list_elements.append(item)
@abstractmethod
def pop(self):
"""
Pops an element from the queue.
"""
pass
class FIFOQueue(Queue):
"""
Implementation of First-In First-Out queues.
"""
def __init__(self):
super(FIFOQueue, self).__init__()
def pop(self):
"""
Pops the first element in the queue.
"""
if self.empty():
return None
return self.list_elements.pop(0)
class LIFOQueue(Queue):
"""
Implementation of Last-In First-Out queues.
"""
def __init__(self):
super(LIFOQueue, self).__init__()
def pop(self):
"""
Pops the last element in the queue.
"""
if self.empty():
return None
return self.list_elements.pop()
class PriorityQueue:
"""
Implementation of queues of items with priorities.
"""
def __init__(self):
self.list_elements = []
self.count = 0
def empty(self) -> bool:
"""
Returns true if the queue is empty.
"""
return len(self.list_elements) == 0
def insert(self, item, priority) -> None:
"""
Inserts an item into the queue with the given priority.
:param item: the element to be put in the queue
:param priority: the priority used to sort the queue. It's often the value of some cost function.
"""
self.count += 1
heapq.heappush(self.list_elements, (priority * 10000, self.count, item))
def pop(self):
"""
Pops the item with the least priority off the heap (Priority queue) if the queue is not empty.
"""
if self.empty():
return None
return heapq.heappop(self.list_elements)[2]
| StarcoderdataPython |
121056 | ######################################################
# A watch (as in a small clock for your wrist or pocket)
#
# Button A sets the mode: Clock or Setting time
# Button B
# in clock mode: shows the time as a scrolling display
# in setting mode: increments the time
#
# The LED array displays the clock time in the format hh:mm.
# The digits of the time are represented by columns of LEDs.
#
# The digits 1 - 5 are represented by more LEDs being lit from
# the bottom up.
#
# For instance the digit 3 would look like:
#
# .
# .
# X
# X
# X
#
#
# The digits 6 - 9 are represented by LEDs being turned off from
# the bottom up. The digit 6 would look like:
#
# X
# X
# X
# X
# .
#
# The centre column is a colon flashing once a second to separate hours from minutes.
#
# The time 17:49 would look like:
#
# . X . . X
# . X . X .
# . X . X .
# . . . X .
# X . . X .
#
#
######################################################
from microbit import *
# Tweak CLOCK_ADJUST to make your system clock more accurate.
# My clock is too fast by 4 seconds every minute so I use 4/60.
# If your clock is too slow by 3 seconds every minute use -3/60.
CLOCK_ADJUST = 4/60
last_button_a_state = False
last_button_b_state = False
last_display_time = 0
base_time = 0
mode = 0
modes = {0:"clock", 1:"set h", 2:"mx10", 3:"m"}
def decode_time(milliseconds):
"""Converts a time in milliseconds into a string with hours:minutes,"""
mins = int(milliseconds / (1000 * 60) % 60)
hrs = int(milliseconds / (1000 * 60 * 60) % 24)
return "{h:0>2}:{m:0>2}".format(h=hrs, m=mins)
def show_time(time):
time_string = decode_time(time)
for i in range(5):
if time_string[i].isdigit():
d = int(time_string[i])
plot_LED_column(i, d)
show_colon(mode==0 and int((time / 1000) % 2))
def show_colon(visible):
display.set_pixel(2, 1, visible*9)
display.set_pixel(2, 3, visible*9)
def get_clock_time():
global base_time
sys_time = running_time() / (1 + CLOCK_ADJUST)
time = (sys_time - base_time) % (24 * 60 * 60 * 1000)
base_time = sys_time - time
return time
def plot_LED_column(column, number):
"""plots a column of LEDs to represent a number from 0 - 9"""
if number > 9:
number = 9
if number <= 5:
for i in range(4, -1, -1):
if i < 5 - number:
display.set_pixel(column, i, 0)
else:
display.set_pixel(column, i, 9)
if number > 5:
for i in range(4, -1, -1):
if i < 5 - (number - 5):
display.set_pixel(column, i, 9)
else:
display.set_pixel(column, i, 0)
while True:
# detect a change in button A's state, the Mode button
button_a_state = button_a.is_pressed()
if button_a_state != last_button_a_state:
last_button_a_state = button_a_state
#increment the mode
if button_a_state == True:
mode = (mode + 1) % 4
display.scroll(modes[mode])
show_time(get_clock_time())
# detect a change in button B's state, the increment / select button
button_b_state = button_b.is_pressed()
if button_b_state != last_button_b_state:
last_button_b_state = button_b_state
if button_b_state == True:
# button B's action depends on the current mode
if mode == 0: #show time
display.scroll(decode_time(get_clock_time()))
elif mode == 1: #setting time: increment hour units
base_time = base_time - (60 * 60 * 1000)
elif mode == 2: #setting time: increment minute tens
base_time = base_time - (10 * 60 * 1000)
elif mode == 3: #setting time: increment minute units
base_time = base_time - (60 * 1000)
show_time(get_clock_time())
#If in clock mode update the display every second
if mode == 0:
display_time = running_time() - last_display_time
if display_time >= 1000:
last_display_time = display_time
show_time(get_clock_time())
sleep(100)
| StarcoderdataPython |
3221489 | <gh_stars>10-100
# Copyright (c) 2017-2019 <NAME>
#
# SPDX-License-Identifier: BSD-3-Clause
# The BSD-3-Clause license for this file can be found in the LICENSE file included with this distribution
# or at https://spdx.org/licenses/BSD-3-Clause.html#licenseText
from struct import pack, unpack_from
from hashlib import sha256
from .misc import modulus_fmt
from .header import SegTag, Header
from .commands import EnumAlgorithm
class SecretKeyBlob(object):
""" Secret Key Blob """
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
# assert value
self._mode = value
@property
def algorithm(self):
return self._alg
@algorithm.setter
def algorithm(self, value):
# assert value
self._alg = value
@property
def flag(self):
return self._flg
@flag.setter
def flag(self, value):
# assert value
self._flg = value
@property
def blob(self):
return self._data
@blob.setter
def blob(self, value):
assert isinstance(value, (bytes, bytearray))
self._data = value
@property
def size(self):
return len(self._data) + 4
def __init__(self, mode, algorithm, flag):
self._mode = mode
self._alg = algorithm
self._flg = flag
self._data = bytearray()
def __repr__(self):
return "SecKeyBlob <Mode: {}, Algo: {}, Flag: 0x{:02X}, Size: {}>".format(self.mode, self.algorithm,
self.flag, len(self._data))
def __eq__(self, obj):
if not isinstance(obj, SecretKeyBlob):
return False
if self.mode != obj.mode or \
self.algorithm != obj.algorithm or \
self.flag != obj.flag:
return False
if self.blob != obj.blob:
return False
return True
def __ne__(self, obj):
return not self.__eq__(obj)
def info(self):
msg = "-" * 60 + "\n"
msg += "SecKeyBlob\n"
msg += "-" * 60 + "\n"
msg += "Mode: {}\n".format(self.mode)
msg += "Algorithm: {}\n".format(self.algorithm)
msg += "Flag: 0x{:02X}\n".format(self.flag)
msg += "Size: {} Bytes\n".format(len(self._data))
return msg
def export(self):
raw_data = pack("4B", self.mode, self.algorithm, self.size, self.flag)
raw_data += bytes(self._data)
return raw_data
@classmethod
def parse(cls, data, offset=0):
(mode, alg, size, flg) = unpack_from("4B", data, offset)
offset += 4
obj = cls(mode, alg, flg)
obj.blob = data[offset: offset + size]
return obj
class Certificate(object):
@property
def version(self):
return self._header.param
@property
def size(self):
return Header.SIZE + len(self._data)
def __init__(self, version=0x40, data=None):
self._header = Header(tag=SegTag.CRT, param=version)
self._data = bytearray() if data is None else bytearray(data)
def __repr__(self):
return "Certificate <Ver: {:X}.{:X}, Size: {}>".format(self.version >> 4, self.version & 0xF, len(self._data))
def __eq__(self, obj):
if not isinstance(obj, Certificate):
return False
if self.version != obj.version:
return False
for i, value in enumerate(self._data):
if obj[i] != value:
return False
return True
def __ne__(self, obj):
return not self.__eq__(obj)
def __len__(self):
return len(self._data)
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
self._data[key] = value
def __iter__(self):
return self._data.__iter__()
def info(self):
msg = "-" * 60 + "\n"
msg += "Certificate (Ver: {:X}.{:X}, Size: {})\n".format(self.version >> 4, self.version & 0xF, len(self._data))
msg += "-" * 60 + "\n"
return msg
def export(self):
self._header.length = self.size
raw_data = self._header.export()
raw_data += self._data
return raw_data
@classmethod
def parse(cls, data, offset=0):
header = Header.parse(data, offset, SegTag.CRT)
offset += Header.SIZE
return cls(header.param, data[offset: offset + header.length - Header.SIZE])
class Signature(object):
@property
def version(self):
return self._header.param
@property
def size(self):
return Header.SIZE + len(self._data)
def __init__(self, version=0x40, data=None):
self._header = Header(tag=SegTag.SIG, param=version)
self._data = bytearray() if data is None else bytearray(data)
def __repr__(self):
return "Signature <Ver: {:X}.{:X}, Size: {}>".format(self.version >> 4, self.version & 0xF, len(self._data))
def __eq__(self, obj):
if not isinstance(obj, Signature):
return False
if self.version != obj.version:
return False
for i, value in enumerate(self._data):
if obj[i] != value:
return False
return True
def __ne__(self, obj):
return not self.__eq__(obj)
def __len__(self):
return len(self._data)
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
self._data[key] = value
def __iter__(self):
return self._data.__iter__()
def info(self):
msg = "-" * 60 + "\n"
msg += "Signature (Ver: {:X}.{:X}, Size: {})\n".format(self.version >> 4, self.version & 0xF, len(self._data))
msg += "-" * 60 + "\n"
return msg
def export(self):
self._header.length = self.size
raw_data = self._header.export()
raw_data += self._data
return raw_data
@classmethod
def parse(cls, data, offset=0):
header = Header.parse(data, offset, SegTag.SIG)
offset += Header.SIZE
return cls(header.param, data[offset: offset + header.length - Header.SIZE])
class MAC(object):
@property
def version(self):
return self._header.param
@property
def size(self):
return Header.SIZE + 4 + len(self._data)
def __init__(self, version=0x40, nonce_bytes=0, mac_bytes=0, data=None):
self._header = Header(tag=SegTag.MAC, param=version)
self.nonce_bytes = nonce_bytes
self.mac_bytes = mac_bytes
self._data = bytearray() if data is None else bytearray(data)
def __repr__(self):
return "MAC <Ver: {:X}.{:X}, Nonce: {}, MAC: {}>".format(self.version >> 4, self.version & 0xF,
self.nonce_bytes, self.mac_bytes)
def __eq__(self, obj):
if not isinstance(obj, MAC):
return False
if self.version != obj.version or \
self.nonce_bytes != obj.nonce_bytes or \
self.mac_bytes != obj.mac_bytes:
return False
for i, value in enumerate(self._data):
if obj[i] != value:
return False
return True
def __ne__(self, obj):
return not self.__eq__(obj)
def __len__(self):
return len(self._data)
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
self._data[key] = value
def __iter__(self):
return self._data.__iter__()
def info(self):
msg = "-" * 60 + "\n"
msg += "MAC (Version: {:X}.{:X})\n".format(self.version >> 4, self.version & 0xF)
msg += "-" * 60 + "\n"
msg += "Nonce Len: {} Bytes\n".format(self.nonce_bytes)
msg += "MAC Len: {} Bytes\n".format(self.mac_bytes)
msg += "[{}]\n".format(self._data)
return msg
def export(self):
self._header.length = self.size
raw_data = self._header.export()
raw_data += pack(">4B", 0, self.nonce_bytes, 0, self.mac_bytes)
raw_data += bytes(self._data)
return raw_data
@classmethod
def parse(cls, data, offset=0):
header = Header.parse(data, offset, SegTag.MAC)
(_, nonce_bytes, _, mac_bytes) = unpack_from(">4B", data, offset)
offset += Header.SIZE + 4
return cls(header.param, nonce_bytes, mac_bytes, data[offset: offset + header.length - (Header.SIZE + 4)])
class SrkItem(object):
SRK_TAG = 0xE1
@property
def algorithm(self):
return self._header.param
@property
def flag(self):
return self._flag
@flag.setter
def flag(self, value):
assert value in (0, 0x80)
self._flag = value
@property
def key_length(self):
return len(self.modulus) * 8
@property
def size(self):
return Header.SIZE + 8 + len(self.modulus) + len(self.exponent)
def __init__(self, modulus, exponent, flag=0, algorithm=EnumAlgorithm.PKCS1):
assert isinstance(modulus, bytes)
assert isinstance(exponent, bytes)
self._header = Header(tag=self.SRK_TAG, param=algorithm)
self.flag = flag
self.modulus = modulus
self.exponent = exponent
def __repr__(self):
return "SRK <Algorithm: {}, CA: {}>".format(EnumAlgorithm[self.algorithm], 'YES' if self.flag == 0x80 else 'NO')
def __eq__(self, obj):
if not isinstance(obj, SrkItem):
return False
if self.algorithm != obj.algorithm or \
self.flag != obj.flag or \
self.key_length != obj.key_length or \
self.modulus != obj.modulus or \
self.exponent != obj.exponent:
return False
return True
def __ne__(self, obj):
return not self.__eq__(obj)
def info(self):
msg = str()
msg += "Algorithm: {}\n".format(EnumAlgorithm[self.algorithm])
msg += "Flag: 0x{:02X} {}\n".format(self.flag, '(CA)' if self.flag == 0x80 else '')
msg += "Length: {} bit\n".format(self.key_length)
msg += "Modulus:\n"
msg += modulus_fmt(self.modulus)
msg += "\n"
msg += "Exponent: {0} (0x{0:X})\n".format(int.from_bytes(self.exponent, 'big'))
return msg
def export(self):
self._header.length = self.size
data = self._header.export()
data += pack(">4B2H", 0, 0, 0, self.flag, len(self.modulus), len(self.exponent))
data += bytes(self.modulus)
data += bytes(self.exponent)
return data
@classmethod
def parse(cls, data, offset=0):
""" Parse segment from bytes array
:param data: The bytes array of SRK segment
:param offset: The offset of input data
:return SrkItem object
"""
header = Header.parse(data, offset, cls.SRK_TAG)
offset += Header.SIZE + 3
(flag, modulus_len, exponent_len) = unpack_from(">B2H", data, offset)
offset += 5
modulus = data[offset: offset + modulus_len]
offset += modulus_len
exponent = data[offset: offset + exponent_len]
return cls(modulus, exponent, flag, header.param)
@classmethod
def from_certificate(cls, cert):
from cryptography import x509
assert isinstance(cert, x509.Certificate)
flag = 0
for extension in cert.extensions:
if extension.oid._name == 'keyUsage':
if extension.value.key_cert_sign:
flag = 0x80
# get modulus and exponent of public key
pub_key_numbers = cert.public_key().public_numbers()
modulus_len = pub_key_numbers.n.bit_length() // 8
if pub_key_numbers.n.bit_length() % 8:
modulus_len += 1
exponent_len = pub_key_numbers.e.bit_length() // 8
if pub_key_numbers.e.bit_length() % 8:
exponent_len += 1
modulus = pub_key_numbers.n.to_bytes(modulus_len, "big")
exponent = pub_key_numbers.e.to_bytes(exponent_len, "big")
return cls(modulus, exponent, flag)
class SrkTable(object):
@property
def version(self):
return self._header.param
@property
def size(self):
size = Header.SIZE
for key in self._keys:
size += key.size
return size
def __init__(self, version=0x40):
self._header = Header(tag=SegTag.CRT, param=version)
self._keys = []
def __repr__(self):
return "SRK_Table <Version: {:X}.{:X}, Keys: {}>".format(self.version >> 4, self.version & 0xF, len(self._keys))
def __eq__(self, obj):
if not isinstance(obj, SrkTable):
return False
if self.version != obj.version:
return False
for key in obj:
if key not in self._keys:
return False
return True
def __ne__(self, obj):
return not self.__eq__(obj)
def __len__(self):
return len(self._keys)
def __getitem__(self, key):
return self._keys[key]
def __setitem__(self, key, value):
assert isinstance(value, SrkItem)
self._keys[key] = value
def __iter__(self):
return self._keys.__iter__()
def info(self):
msg = "-" * 60 + "\n"
msg += "SRK Table (Version: {:X}.{:X}, Keys: {})\n".format(self.version>>4, self.version&0xF, len(self._keys))
msg += "-" * 60 + "\n"
for i, srk in enumerate(self._keys):
msg += "Key Index: {} \n".format(i)
msg += srk.info()
msg += "\n"
return msg
def append(self, srk):
self._keys.append(srk)
def export_fuses(self):
data = b''
for srk in self._keys:
srk_data = srk.export()
data += sha256(srk_data).digest()
return sha256(data).digest()
def export(self):
self._header.length = self.size
raw_data = self._header.export()
for srk in self._keys:
raw_data += srk.export()
return raw_data
@classmethod
def parse(cls, data, offset=0):
header = Header.parse(data, offset, SegTag.CRT)
offset += Header.SIZE
obj = cls(header.param)
length = header.length - Header.SIZE
while length > 0:
srk = SrkItem.parse(data, offset)
offset += srk.size
length -= srk.size
obj.append(srk)
return obj
| StarcoderdataPython |
3232709 | from enums.configuration import Configuration
from enums.word_evaluation_type import WordEvaluationType
from enums.overlap_type import OverlapType
from services.experiments.process.neighbourhood_similarity_process_service import NeighbourhoodSimilarityProcessService
from enums.font_weight import FontWeight
from entities.plot.label_options import LabelOptions
from entities.plot.figure_options import FigureOptions
from entities.plot.plot_options import PlotOptions
from entities.cache.cache_options import CacheOptions
from services.cache_service import CacheService
from scipy import sparse
from scipy.sparse import vstack
from tqdm import tqdm
from entities.word_neighbourhood_stats import WordNeighbourhoodStats
from services.log_service import LogService
from entities.plot.legend_options import LegendOptions
from typing import Dict, List, Tuple
from matplotlib.pyplot import plot
import math
import numpy as np
from entities.word_evaluation import WordEvaluation
from services.arguments.ocr_evaluation_arguments_service import OCREvaluationArgumentsService
from services.file_service import FileService
from services.metrics_service import MetricsService
from services.plot_service import PlotService
from services.fit_transformation_service import FitTransformationService
from services.process.evaluation_process_service import EvaluationProcessService
class WordNeighbourhoodService:
def __init__(
self,
arguments_service: OCREvaluationArgumentsService,
metrics_service: MetricsService,
plot_service: PlotService,
file_service: FileService,
log_service: LogService,
fit_transformation_service: FitTransformationService,
cache_service: CacheService,
neighbourhood_similarity_process_service: NeighbourhoodSimilarityProcessService,
process_service: EvaluationProcessService):
self._arguments_service = arguments_service
self._metrics_service = metrics_service
self._plot_service = plot_service
self._file_service = file_service
self._log_service = log_service
self._fit_transformation_service = fit_transformation_service
self._cache_service = cache_service
self._neighbourhood_similarity_process_service = neighbourhood_similarity_process_service
# load previously cached word similarity calculations
common_tokens = process_service.get_common_words()
self._word_similarity_indices, self._cache_needs = self._load_cached_calculations(
common_tokens)
def plot_word_neighbourhoods(
self,
target_word_evaluation: WordEvaluation,
word_neighbourhood_stats: WordNeighbourhoodStats):
self._log_service.log_debug(
f'Plotting neighbourhoods for word \'{target_word_evaluation.word}\'')
all_words = word_neighbourhood_stats.get_all_words()
all_word_embeddings = []
for i in range(word_neighbourhood_stats.neighbourhoods_amount):
all_word_embeddings.append(
target_word_evaluation.get_embeddings(i))
all_word_embeddings.extend(
word_neighbourhood_stats.get_all_embeddings())
assert all(not np.isnan(x).any()
for x in all_word_embeddings), "Invalid values found in word embeddings"
fitted_result = self._fit_transformation_service.fit_and_transform_vectors(
number_of_components=word_neighbourhood_stats.neighbourhoods_amount,
vectors=all_word_embeddings)
self._plot_fitted_result(
fitted_result[:word_neighbourhood_stats.neighbourhoods_amount],
fitted_result[word_neighbourhood_stats.neighbourhoods_amount:],
target_word_evaluation,
all_words,
word_neighbourhood_stats)
def get_word_neighbourhoods(
self,
word_evaluation: WordEvaluation,
vocabulary_evaluations: List[WordEvaluation],
neighbourhood_set_sizes: List[int],
overlap_type: OverlapType,
include_embeddings: bool = False) -> Dict[int, WordNeighbourhoodStats]:
self._log_service.log_debug(
f'Extracting neighbourhoods for word \'{word_evaluation.word}\'')
result = {
neighbourhood_set_size: WordNeighbourhoodStats(
word_evaluation.word, neighbourhoods=[])
for neighbourhood_set_size in neighbourhood_set_sizes
}
model_indices = []
if overlap_type == OverlapType.BASEvsGT:
model_indices = [2, 1]
elif overlap_type == OverlapType.BASEvsOCR:
model_indices = [2, 0]
elif overlap_type == OverlapType.BASEvsOG:
model_indices = [2, 3]
elif overlap_type == OverlapType.GTvsOCR:
model_indices = [1, 0]
for i in model_indices:
word_neighbourhoods_per_set_size = self._get_word_neighbourhood(
word_evaluation,
vocabulary_evaluations,
embeddings_idx=i,
neighbourhood_set_sizes=neighbourhood_set_sizes,
output_full_evaluations=include_embeddings)
for neighbourhood_set_size, word_neighbourhood in word_neighbourhoods_per_set_size.items():
result[neighbourhood_set_size].add_neighbourhood(word_neighbourhood)
return result
def _plot_fitted_result(
self,
target_word_fitted_vectors: np.ndarray,
fitted_vectors: np.ndarray,
target_word_evaluation: WordEvaluation,
all_words: List[str],
word_neighbourhoods: WordNeighbourhoodStats):
ax = self._plot_service.create_plot()
labels_colors = ['crimson', 'royalblue', 'darkgreen']
word_neighbourhood_length = word_neighbourhoods.neighbourhood_size
plot_options = PlotOptions(
ax=ax,
legend_options=LegendOptions(show_legend=False),
figure_options=FigureOptions(
show_plot=False))
for i in range(word_neighbourhoods.neighbourhoods_amount):
target_word_fitted_vector = target_word_fitted_vectors[i]
current_fitted_vectors = fitted_vectors[(
i * word_neighbourhood_length):(i+1)*word_neighbourhood_length]
x_coords = target_word_fitted_vector[0] + \
current_fitted_vectors[:, 0]
y_coords = target_word_fitted_vector[1] + \
current_fitted_vectors[:, 1]
self._plot_service.plot_scatter(
x_coords,
y_coords,
plot_options=plot_options)
current_words = [target_word_evaluation.word] + all_words[(
i*word_neighbourhood_length):((i+1)*word_neighbourhood_length)]
current_word_colors = [labels_colors[i]] + [labels_colors[i] if all_words.count(
x) == 1 else labels_colors[-1] for x in current_words[1:]]
labels_options = [
LabelOptions(
x=x_coords[k],
y=y_coords[k],
text=current_words[k],
text_color=current_word_colors[k])
for k in range(word_neighbourhood_length)]
labels_options[0]._font_weight = FontWeight.Bold
labels_options[0]._font_size = 15
self._plot_service.plot_labels(labels_options, plot_options)
self._plot_service.set_plot_properties(
ax=ax,
figure_options=FigureOptions(
title=f'Neighbourhoods `{target_word_evaluation.word}`',
hide_axis=True),
legend_options=LegendOptions(
show_legend=True,
legend_colors=labels_colors,
legend_labels=['raw', 'ground truth', 'overlapping']))
experiments_folder = self._file_service.get_experiments_path()
neighbourhoods_folder = self._file_service.combine_path(
experiments_folder,
'neighbourhoods',
self._arguments_service.get_configuration_name(),
create_if_missing=True)
self._plot_service.save_plot(
save_path=neighbourhoods_folder,
filename=f'{target_word_evaluation}-neighborhood-change')
def _token_already_calculated(self, token: str, embeddings_idx: int) -> bool:
if (token not in self._word_similarity_indices.keys() or
embeddings_idx not in self._word_similarity_indices[token].keys() or
self._word_similarity_indices[token][embeddings_idx] is None):
return False
return True
def _get_word_neighbourhood(
self,
word_evaluation: WordEvaluation,
model_evaluations: List[WordEvaluation],
embeddings_idx: int,
neighbourhood_set_sizes: List[int],
output_full_evaluations: bool = False) -> Dict[int, List[WordEvaluation]]:
# We check if we have already calculated this word neighbourhood for the selected embeddings id
if (not output_full_evaluations and self._token_already_calculated(word_evaluation.word, embeddings_idx)):
indices = self._word_similarity_indices[word_evaluation.word][embeddings_idx]
else:
# If no calculation is available, we calculate and cache
target_embeddings = np.array(
[word_evaluation.get_embeddings(embeddings_idx)])
model_embeddings = np.array([model_evaluation.get_embeddings(
embeddings_idx) for model_evaluation in model_evaluations])
distances = self._metrics_service.calculate_cosine_similarities(
target_embeddings, model_embeddings)
indices = np.argsort(distances.squeeze())[::-1]
if not output_full_evaluations:
self._cache_needs[WordEvaluationType(embeddings_idx)] = True
if word_evaluation.word not in self._word_similarity_indices.keys():
self._word_similarity_indices[word_evaluation.word] = {}
# We mark the indices to be cached because we add a new entry
self._word_similarity_indices[word_evaluation.word][embeddings_idx] = indices
result = {}
for neighbourhood_set_size in neighbourhood_set_sizes:
if neighbourhood_set_size > len(indices):
self._log_service.log_error(
f'Neighbourhood set size ({neighbourhood_set_size}) is larger than the collection ({len(indices)}). Using the entire collection instead')
raise Exception('Invalid set size')
max_indices = indices[:neighbourhood_set_size]
if output_full_evaluations:
result_evaluations = [x for i, x in enumerate(
model_evaluations) if i in max_indices]
result[neighbourhood_set_size] = result_evaluations
continue
result[neighbourhood_set_size] = max_indices
return result
def generate_neighbourhood_plots(
self,
word_evaluations,
cosine_distances: Dict[str, float]):
target_tokens = self._neighbourhood_similarity_process_service.get_target_tokens(
cosine_distances)
neighbourhood_set_size = 50
for target_token in tqdm(target_tokens, desc="Processing target tokens", total=len(target_tokens)):
i = next(i for i, word_evaluation in enumerate(
word_evaluations) if word_evaluation.word == target_token)
if i is None:
continue
word_evaluation = word_evaluations[i]
remaining_words = [word_evaluation for idx, word_evaluation in enumerate(
word_evaluations) if word_evaluation.contains_all_embeddings(OverlapType.GTvsOCR) and idx != i]
word_neighbourhood_stats = self.get_word_neighbourhoods(
word_evaluation,
remaining_words,
neighbourhood_set_sizes=[neighbourhood_set_size],
overlap_type=OverlapType.GTvsOCR,
include_embeddings=True)
self.plot_word_neighbourhoods(
word_evaluation,
word_neighbourhood_stats[neighbourhood_set_size])
def generate_neighbourhood_similarities(
self,
word_evaluations: List[WordEvaluation],
overlap_type: OverlapType) -> Dict[str, int]:
self._log_service.log_debug(
f'Generating neighbourhood similarity results for overlap type \'{overlap_type.value}\'')
# get all indices of words that support the current overlap type
common_words_indices = [
i
for i, word_evaluation in enumerate(word_evaluations)
if word_evaluation.contains_all_embeddings(overlap_type)]
percentages = list(range(1, 101, 1)) # 1..20
words_amounts = [
int(len(common_words_indices) * (float(percentage)/ 100)) - (1 if percentage == 100 else 0)
for percentage in percentages]
result = {
percentage: {}
for percentage in percentages
}
self._log_service.log_summary(
f'Total \'{overlap_type.value}\' neighbourhood overlaps', len(common_words_indices))
for i in tqdm(iterable=common_words_indices, desc=f'Calculating neighbourhood overlaps [\'{overlap_type.value}\']', total=len(common_words_indices)):
# get the target word evaluation
word_evaluation = word_evaluations[i]
# get the remaining valid word evaluations
remaining_words = [word_evaluations[idx]
for idx in common_words_indices if idx != i]
# calculate the word neighbourhood stats for this word
word_neighbourhood_stats_per_set_size = self.get_word_neighbourhoods(
word_evaluation,
remaining_words,
neighbourhood_set_sizes=words_amounts,
overlap_type=overlap_type)
# we only need the overlaps amount
for words_amount, percentage in zip(words_amounts, percentages):
result[percentage][word_evaluation.word] = word_neighbourhood_stats_per_set_size[words_amount].overlaps_amount
# occasionally cache the calculations performed so far in case the process is interrupted
if i % 500 == 0:
self._log_service.log_summary(
f'Processed \'{overlap_type.value}\' neighbourhood overlaps', i)
self._save_calculations()
self._save_calculations()
return result
def _load_cached_calculations(self, common_tokens: List[str]) -> Dict[str, Dict[int, list]]:
result = {token: {} for token in common_tokens}
cache_needs = {}
for i, word_evaluation_type in enumerate(WordEvaluationType):
cache_needs[word_evaluation_type] = False
word_similarities_cache_options = self._create_cache_options(
word_evaluation_type)
current_word_similarity_indices: Dict[str, Dict[int, list]] = self._cache_service.get_item_from_cache(
word_similarities_cache_options)
if current_word_similarity_indices is None:
cache_needs[word_evaluation_type] = True
continue
for token, value in current_word_similarity_indices.items():
if token not in result.keys():
result[token] = {}
result[token][i] = value
return result, cache_needs
def _save_calculations(self):
for i, word_evaluation_type in enumerate(WordEvaluationType):
if not self._cache_needs[word_evaluation_type]:
continue
cache_options = self._create_cache_options(word_evaluation_type)
current_value = {token: embeddings[i] if i in embeddings.keys(
) else None for token, embeddings in self._word_similarity_indices.items()}
self._cache_service.cache_item(
current_value,
cache_options)
self._cache_needs[word_evaluation_type] = False
def _create_cache_options(self, word_evaluation_type: WordEvaluationType):
random_suffix = ''
if word_evaluation_type == WordEvaluationType.Baseline or self._arguments_service.initialize_randomly:
random_suffix = '-rnd'
configuration_value = None
if word_evaluation_type == WordEvaluationType.Baseline:
configuration_value = Configuration.SkipGram
word_eval_type_suffix = ''
if word_evaluation_type != WordEvaluationType.Baseline:
word_eval_type_suffix = f'-{str(word_evaluation_type.value)}'
if word_evaluation_type != WordEvaluationType.CurrentOriginal:
word_eval_type_suffix = f'{word_eval_type_suffix}-lr{self._arguments_service.learning_rate}'
result = CacheOptions(
'word-similarities',
seed_specific=True,
key_suffixes=[
word_eval_type_suffix,
'-sep' if self._arguments_service.separate_neighbourhood_vocabularies else '',
random_suffix,
'-min',
str(self._arguments_service.minimal_occurrence_limit)
],
configuration=configuration_value)
return result
| StarcoderdataPython |
1753048 | <reponame>tehkillerbee/mmdetection-to-tensorrt
from .bbox_head import BBoxHeadWraper
from .double_bbox_head import DoubleConvFCBBoxHeadWraper
from .sabl_head import SABLHeadWraper
__all__ = ['BBoxHeadWraper', 'DoubleConvFCBBoxHeadWraper', 'SABLHeadWraper']
| StarcoderdataPython |
74489 | <gh_stars>0
import calendar
import threading
import keras
from .webap_login import WebAp
from time import ctime, sleep
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import schedule
from random import randint
from datetime import datetime, timedelta, time
import sys
class AutoPunching(WebAp):
def nav_to_checkform(self):
user_menu = self.login(*self.account)
self.browser.switch_to_default_content()
self.browser.switch_to_frame(user_menu)
labor_section = self.wait.until(
EC.presence_of_element_located((By.ID, "spnode_[B40]_兼任助理差勤作業")))
labor_section.click()
self.browser.switch_to_default_content()
self.browser.switch_to_frame(self.wait.until(
EC.presence_of_element_located((By.CSS_SELECTOR, "frame[name='MAIN']"))))
labor_section = self.wait.until(
EC.presence_of_element_located(
(By.CSS_SELECTOR, "#SubMenu_dgData > tbody > tr.TRAlternatingItemStyle > td:nth-child(1) > a")))
labor_section.click()
def check_in(self):
check_in_bt = self.wait.until(
EC.presence_of_element_located((By.ID, "B4001A_btnIN")))
check_in_bt.click()
self.wait.until(EC.alert_is_present())
alert = self.browser.switch_to.alert
if alert.text == "打上班卡完成":
print("上班打卡成功")
elif alert.text == "不允許重複打卡!!!":
print("失敗,重複打卡")
print("Checking in done at %s", ctime())
alert.accept()
self.logout()
def check_out(self, work_content="寫程式"):
work_content_area = self.wait.until(
EC.presence_of_element_located((By.ID, "B4001A_txtJOB_NOTES")))
work_content_area.send_keys(work_content)
check_out_bt = self.wait.until(
EC.presence_of_element_located((By.ID, "B4001A_btnOFF")))
check_out_bt.click()
self.wait.until(EC.alert_is_present())
alert = self.browser.switch_to.alert
if alert.text == "打下班卡完成":
print("下班打卡成功")
elif alert.text == "不允許重複打卡!!!":
print("失敗,重複打卡")
print("Checking out done at %s" % ctime())
alert.accept()
self.logout()
def plan_job(self, **kwarg):
login_delta = timedelta(minutes=5)
for weekday, (check_in_time, check_out_time) in kwarg.items():
# Login to webap system and navigate to checking page
check_in_time = datetime.strptime(check_in_time, "%H:%M")
login_time = (check_in_time - login_delta).strftime("%H:%M")
job = schedule.every().weeks
job.start_day = weekday
job.at(login_time).do(self.nav_to_checkform, kwarg["id"], kwarg["password"])
# Check-in
minute_delta = timedelta(minutes=randint(0, 5))
check_in_time = (check_in_time - minute_delta).strftime("%H:%M")
job = schedule.every().weeks
job.start_day = weekday
job.at(check_in_time).do(self.check_in)
# Check-out
minute_delta = timedelta(minutes=randint(0, 5))
check_out_time = (check_out_time + minute_delta).strftime("%H:%M")
job = schedule.every().weeks
job.start_day = weekday
job.at(check_out_time).do(self.check_out)
print("已排程事項:")
for job in schedule.jobs:
print(job)
def fill_break_time(self, start_time="12:15", end_time="12:45"):
break_time_start = self.wait.until(
EC.presence_of_element_located((By.ID, "B4001A_txtBREAK_STM")))
break_time_start.send_keys(start_time)
break_time_end = self.wait.until(
EC.presence_of_element_located((By.ID, "B4001A_txtBREAK_ETM")))
break_time_end.send_keys(end_time)
def calendar_plan_month_job(self, year, month, start_day, end_day, duty_datetime, holiday_list=(),
login_delay=2):
c = calendar.Calendar(firstweekday=6)
login_delta = timedelta(minutes=login_delay)
today = datetime.today().day
if today > start_day:
start_day = today
print("排程開始日為%s月%s號" % (month, start_day))
print("國定假日為%s" % holiday_list)
for date in c.itermonthdates(year, month):
weekday = date.strftime("%A").lower()
if date.day == 21:
print(weekday)
if date.month == month and date.day not in holiday_list \
and date.day in range(start_day, end_day + 1) \
and weekday in duty_datetime.keys():
# Calculate check-in time.
check_in_time = duty_datetime[weekday][0]
check_in_time = datetime.strptime(check_in_time, "%H:%M").time()
check_in_time = datetime.combine(date, check_in_time)
check_in_time = check_in_time - timedelta(minutes=randint(0, 5))
print(date.ctime(), date.weekday())
# Login to webap system and navigate to checking page
login_time = check_in_time - login_delta
if (login_time - datetime.now()).days >= 0:
print("login_time=%s " % login_time.ctime())
print("check_in_time=%s " % check_in_time.ctime())
print("second to wait: %s" % (
login_time - datetime.now()).total_seconds())
threading.Timer((login_time - datetime.now()).total_seconds(), self.nav_to_checkform).start()
# Check in
threading.Timer((check_in_time - datetime.now()).total_seconds(), self.check_in).start()
# threading.Timer((check_in_time - datetime.now()).total_seconds(), self.logout).start()
print()
else:
print("今天上班卡時間已過,繼續排程下班卡")
# Check out
check_out_time = duty_datetime[weekday][1]
check_out_time = datetime.strptime(check_out_time, "%H:%M").time()
check_out_time = datetime.combine(date, check_out_time)
check_out_time = check_out_time + timedelta(minutes=randint(0, 5))
login_time = check_out_time - login_delta
if (login_time - datetime.now()).days >= 0:
threading.Timer((login_time - datetime.now()).total_seconds(), self.nav_to_checkform).start()
print("login_time=%s " % login_time.ctime())
if check_out_time - check_in_time > timedelta(hours=4.5):
print("break_time= 12:15 ~ 12:45")
threading.Timer((check_out_time - datetime.now()).total_seconds(), self.fill_break_time).start()
threading.Timer((check_out_time - datetime.now()).total_seconds(), self.check_out).start()
# threading.Timer((check_out_time - datetime.now()).total_seconds(), self.logout).start()
print("check_out_time=%s " % check_out_time.ctime())
print()
else:
print("今天下班卡時間已過,繼續排程下一天")
def test_job(self, arg="default"):
print("I'm working. arg=" + arg)
print(datetime.now())
if __name__ == '__main__':
test_ap = AutoPunching("cbc106008", "2848444B")
int_args = list(map(int, sys.argv[1:]))
test_ap.calendar_plan_month_job(int_args[0], int_args[1], int_args[2], int_args[3],
{"monday": ["13:00", "17:00"],
"wednesday": ["13:00", "17:00"],
"friday": ["13:00", "17:00"]}, int_args[4:])
| StarcoderdataPython |
26156 | <reponame>valhallasw/phabricator-tools
"""Start a local webserver to report the status of an arcyd instance."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# abdcmd_instaweb
#
# Public Functions:
# getFromfilePrefixChars
# setupParser
# process
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
import BaseHTTPServer
import os
import abdcmd_arcydstatushtml
import abdcmd_repostatushtml
def getFromfilePrefixChars():
return None
def setupParser(parser):
parser.add_argument(
'--port',
metavar="PORT",
type=int,
default=8000,
help="port to serve pages on")
parser.add_argument(
'--report-file',
metavar="REPORTFILE",
type=str,
required=True,
help="path to the arcyd report file to render")
parser.add_argument(
'--repo-file-dir',
metavar="REPOFILEDIR",
type=str,
required=True,
help="path to the repo files to render")
class _NotFoundError(Exception):
pass
class _RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def __init__(self, instaweb_args, *args):
self._instaweb_args = instaweb_args
self.path = None # for pychecker
self.wfile = None # for pychecker
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args)
def do_GET(self):
try:
content = self._get_content()
except _NotFoundError:
self.send_response(404)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write("<html><body><h1>404</h1></body></html>")
self.wfile.close()
else:
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(content)
self.wfile.close()
def _get_content(self):
args = self._instaweb_args
if self.path == '/':
content = abdcmd_arcydstatushtml.render_content(
args.report_file, '')
elif self.path.lower().endswith('favicon.ico'):
raise _NotFoundError('could not find favicon')
else:
relative_path = self.path.lstrip('/')
dir_path = os.path.join(args.repo_file_dir, relative_path)
# XXX: this is fragile, will go away once arcyd folder
# layout is standardized
repo_path = dir_path + '.try'
branches_path = dir_path + '.ok'
content = abdcmd_repostatushtml.render_content(
repo_path, branches_path)
return content
def _request_handler_factory(instaweb_args):
def factory(*args):
return _RequestHandler(instaweb_args, *args)
return factory
def process(args):
# start a webserver
server_address = ('', args.port)
factory = _request_handler_factory(args)
httpd = BaseHTTPServer.HTTPServer(server_address, factory)
httpd.serve_forever()
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| StarcoderdataPython |
40220 | from datetime import datetime, timedelta
from random import sample, choice, randrange
from unittest import TestCase
import tests.test_timeinterval as ti
from tests.factories import make_sets, make_moments
from timeset import TimeSet
t0 = datetime(2019, 7, 19)
t6 = datetime(2019, 7, 25)
t = make_moments(20, t0, t6)
sets = make_sets(20, t0, t6, 3) # Guaranteed not to make empty sets
class TestTimeSet(TestCase):
def test_from_interval(self):
s = make_sets(1, t[0], t[19], 1)[0]
i, = s.intervals # Unpacking single element
s1 = TimeSet.from_interval(i.start, i.end)
self.assertEqual(s1, s)
def test_empty(self):
self.assertTrue(TimeSet([]).is_empty(), "Set without intervals")
self.assertTrue(TimeSet.empty().is_empty(), "Empty set")
self.assertTrue(TimeSet([ti.empty, ti.empty]).is_empty(), "Set of empty intervals")
def test_not_empty(self):
self.assertFalse(TimeSet.from_interval(t[2], t[5]).is_empty())
def test_union(self):
s0, s1 = sample(sets, k=2)
u = s0.union(s1)
self.assertTrue(s0.is_subset(u))
self.assertTrue(s1.is_subset(u))
intervals = list(s0.intervals.union(s1.intervals))
self.assertEqual(u, TimeSet(intervals))
def test_empty_union(self):
e = TimeSet.empty()
s = choice(sets)
self.assertEqual(e.union(s), s)
def test_intersection(self):
s0, s1 = sample(sets, k=2)
intersection = s0.intersection(s1)
self.assertTrue(intersection.is_subset(s0))
self.assertTrue(intersection.is_subset(s1))
def test_difference(self):
s0, s1 = sample(sets, k=2)
diff = s1.difference(s0)
self.assertTrue(diff.is_subset(s1))
self.assertTrue(s0.intersection(diff).is_empty())
def test_contains(self):
s = choice(sets)
i = next(iter(s.intervals))
middle = i.start + (i.end-i.start)/2
self.assertTrue(s.contains(i.start), "Starting point")
self.assertTrue(s.contains(middle), "Middle point")
def test_not_contains(self):
s = choice(sets)
i = next(iter(s.intervals))
self.assertFalse(s.contains(i.end), "Interval ending point")
self.assertFalse(s.contains(t6+timedelta(days=1)), "Point outside")
def test_is_subset(self):
s = choice(sets)
i = sample(s.intervals, randrange(1, len(s.intervals)+1))
self.assertTrue(TimeSet(i).is_subset(s))
def test_is_not_subset(self):
s0, s1 = sample(sets, k=2)
while s0.is_subset(s1) or s1.is_subset(s0):
s0, s1 = sample(sets, k=2)
self.assertFalse(s0.union(s1).is_subset(s0), "Not subset!")
self.assertFalse(s0.union(s1).is_subset(s1), "Not subset!")
def test_is_empty(self):
self.assertTrue(TimeSet([]).is_empty(), "No intervals")
self.assertTrue(TimeSet([ti.empty]).is_empty(), "Empty interval")
def test_is_not_empty(self):
s = choice(sets)
self.assertFalse(s.is_empty(), "Not empty set!")
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.