id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6578404 | <reponame>Ouranosinc/malleefowl
import pytest
from pywps import Service
from pywps.tests import assert_response_success
from .common import TESTDATA, client_for
from malleefowl.processes.wps_esgsearch import ESGSearchProcess
@pytest.mark.online
def test_dataset():
client = client_for(Service(processes=[ESGSearchProcess()]))
datainputs = "url={};search_type={};limit={};offset={};constraints={}".format(
'http://esgf-data.dkrz.de/esg-search',
'Dataset', '10', '10',
'project:CORDEX,time_frequency:mon,variable:tas,experiment:historical')
resp = client.get(
service='WPS', request='Execute', version='1.0.0',
identifier='esgsearch',
datainputs=datainputs)
print resp.get_data()
assert_response_success(resp)
@pytest.mark.online
def test_dataset_with_spaces():
client = client_for(Service(processes=[ESGSearchProcess()]))
datainputs = "url={};search_type={};limit={};offset={};constraints={}".format(
'http://esgf-data.dkrz.de/esg-search',
'Dataset', '10', '10',
' project: CORDEX, time_frequency : mon,variable:tas, experiment:historical ')
resp = client.get(
service='WPS', request='Execute', version='1.0.0',
identifier='esgsearch',
datainputs=datainputs)
assert_response_success(resp)
@pytest.mark.online
def test_dataset_out_of_limit():
client = client_for(Service(processes=[ESGSearchProcess()]))
datainputs = "url={};search_type={};limit={};offset={};constraints={}".format(
'http://esgf-data.dkrz.de/esg-search',
'Dataset', '100', '99',
'project:CORDEX,time_frequency:mon,variable:tas,experiment:historical')
resp = client.get(
service='WPS', request='Execute', version='1.0.0',
identifier='esgsearch',
datainputs=datainputs)
assert_response_success(resp)
@pytest.mark.online
def test_dataset_out_of_offset():
client = client_for(Service(processes=[ESGSearchProcess()]))
datainputs = "url={};search_type={};limit={};offset={};constraints={}".format(
'http://esgf-data.dkrz.de/esg-search',
'Dataset', '1', '1000',
'project:CORDEX,time_frequency:mon,variable:tas,experiment:historical')
resp = client.get(
service='WPS', request='Execute', version='1.0.0',
identifier='esgsearch',
datainputs=datainputs)
assert_response_success(resp)
@pytest.mark.online
def test_dataset_latest():
client = client_for(Service(processes=[ESGSearchProcess()]))
datainputs = "url={};search_type={};limit={};offset={};constraints={};latest={}".format(
'http://esgf-data.dkrz.de/esg-search',
'Dataset', '100', '0',
'project:CORDEX,time_frequency:mon,variable:tas,experiment:historical',
'False')
resp = client.get(
service='WPS', request='Execute', version='1.0.0',
identifier='esgsearch',
datainputs=datainputs)
assert_response_success(resp)
@pytest.mark.online
def test_dataset_query():
client = client_for(Service(processes=[ESGSearchProcess()]))
datainputs = "url={};search_type={};limit={};offset={};constraints={};query={}".format(
'http://esgf-data.dkrz.de/esg-search',
'Dataset', '1', '0',
'project:CORDEX',
'geopotential')
resp = client.get(
service='WPS', request='Execute', version='1.0.0',
identifier='esgsearch',
datainputs=datainputs)
assert_response_success(resp)
@pytest.mark.online
def test_aggregation():
client = client_for(Service(processes=[ESGSearchProcess()]))
datainputs = "url={};search_type={};limit={};offset={};constraints={}".format(
'http://esgf-data.dkrz.de/esg-search',
'Aggregation', '5', '20',
'project:CORDEX,time_frequency:mon,variable:tas,experiment:historical')
resp = client.get(
service='WPS', request='Execute', version='1.0.0',
identifier='esgsearch',
datainputs=datainputs)
assert_response_success(resp)
@pytest.mark.online
def test_file():
client = client_for(Service(processes=[ESGSearchProcess()]))
datainputs = "url={};search_type={};limit={};offset={};constraints={}".format(
'http://esgf-data.dkrz.de/esg-search',
'File', '1', '30',
'project:CORDEX,time_frequency:mon,variable:tas,experiment:historical')
resp = client.get(
service='WPS', request='Execute', version='1.0.0',
identifier='esgsearch',
datainputs=datainputs)
assert_response_success(resp)
| StarcoderdataPython |
5093303 | #!/usr/bin/env python
# https://bugs.python.org/issue6634
import os
import sys
import time
import threading as mt
def work():
print 'exit now'
# NOTE: This should call the python interpreter to exit.
# This is not the case, only the thread is terminated.
sys.exit()
def test():
child = mt.Thread(target=work)
child.daemon = True
child.start()
time.sleep(100)
if __name__ == '__main__':
test()
| StarcoderdataPython |
1981346 | <reponame>jwparktom/convit
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the CC-by-NC license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import json
import random
from torchvision import datasets, transforms
from torchvision.datasets.folder import ImageFolder, DatasetFolder, default_loader
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.data import create_transform
from typing import Any, Callable, cast, Dict, List, Optional, Tuple
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')
def has_file_allowed_extension(filename: str, extensions: Tuple[str, ...]) -> bool:
return filename.lower().endswith(extensions)
def make_subsampled_dataset(
directory, class_to_idx, extensions=None,is_valid_file=None, sampling_ratio=1., nb_classes=None):
instances = []
directory = os.path.expanduser(directory)
both_none = extensions is None and is_valid_file is None
both_something = extensions is not None and is_valid_file is not None
if both_none or both_something:
raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
if extensions is not None:
def is_valid_file(x: str) -> bool:
return has_file_allowed_extension(x, cast(Tuple[str, ...], extensions))
is_valid_file = cast(Callable[[str], bool], is_valid_file)
for i, target_class in enumerate(sorted(class_to_idx.keys())):
if nb_classes is not None and i>=nb_classes:
break
class_index = class_to_idx[target_class]
target_dir = os.path.join(directory, target_class)
if not os.path.isdir(target_dir):
continue
num_imgs = int(len(os.listdir(target_dir))*sampling_ratio)
imgs=0
for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
for fname in sorted(fnames):
if imgs==num_imgs :
break
path = os.path.join(root, fname)
if is_valid_file(path):
item = path, class_index
instances.append(item)
imgs+=1
return instances
class INatDataset(ImageFolder):
def __init__(self, root, train=True, year=2018, transform=None, target_transform=None,
category='name', loader=default_loader):
self.transform = transform
self.loader = loader
self.target_transform = target_transform
self.year = year
# assert category in ['kingdom','phylum','class','order','supercategory','family','genus','name']
path_json = os.path.join(root, f'{"train" if train else "val"}{year}.json')
with open(path_json) as json_file:
data = json.load(json_file)
with open(os.path.join(root, 'categories.json')) as json_file:
data_catg = json.load(json_file)
path_json_for_targeter = os.path.join(root, f"train{year}.json")
with open(path_json_for_targeter) as json_file:
data_for_targeter = json.load(json_file)
targeter = {}
indexer = 0
for elem in data_for_targeter['annotations']:
king = []
king.append(data_catg[int(elem['category_id'])][category])
if king[0] not in targeter.keys():
targeter[king[0]] = indexer
indexer += 1
self.nb_classes = len(targeter)
self.samples = []
for elem in data['images']:
cut = elem['file_name'].split('/')
target_current = int(cut[2])
path_current = os.path.join(root, cut[0], cut[2], cut[3])
categors = data_catg[target_current]
target_current_true = targeter[categors[category]]
self.samples.append((path_current, target_current_true))
# __getitem__ and __len__ inherited from ImageFolder
class SubsampledDatasetFolder(DatasetFolder):
def __init__(self, root, loader, extensions=None, transform=None, target_transform=None, is_valid_file=None, sampling_ratio=1., nb_classes=None):
super(DatasetFolder, self).__init__(root, transform=transform,
target_transform=target_transform)
classes, class_to_idx = self._find_classes(self.root)
samples = make_subsampled_dataset(self.root, class_to_idx, extensions, is_valid_file, sampling_ratio=sampling_ratio, nb_classes=nb_classes)
if len(samples) == 0:
msg = "Found 0 files in subfolders of: {}\n".format(self.root)
if extensions is not None:
msg += "Supported extensions are: {}".format(",".join(extensions))
raise RuntimeError(msg)
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
# __getitem__ and __len__ inherited from DatasetFolder
class ImageNetDataset(SubsampledDatasetFolder):
def __init__(self, root, loader=default_loader, is_valid_file=None, **kwargs):
super(ImageNetDataset, self).__init__(root, loader, IMG_EXTENSIONS if is_valid_file is None else None,
is_valid_file=is_valid_file, **kwargs)
self.imgs = self.samples
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
if args.data_set == 'CIFAR10':
args.data_path = "/datasets01/cifar-pytorch/11222017/"
dataset = datasets.CIFAR10(args.data_path, train=is_train, transform=transform)
nb_classes = 10
if args.data_set == 'CIFAR100':
args.data_path = "/datasets01/cifar100/022818/data/"
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform)
nb_classes = 100
elif args.data_set == 'IMNET':
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = ImageNetDataset(root, transform=transform,
sampling_ratio= (args.sampling_ratio if is_train else 1.), nb_classes=args.nb_classes)
nb_classes = args.nb_classes if args.nb_classes is not None else 1000
elif args.data_set == 'INAT':
dataset = INatDataset(args.data_path, train=is_train, year=2018,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
elif args.data_set == 'INAT19':
args.data_path = "/datasets01/inaturalist/090619/"
dataset = INatDataset(args.data_path, train=is_train, year=2019,
category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
return dataset, nb_classes
def build_transform(is_train, args):
resize_im = args.input_size > 32
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation=args.train_interpolation,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
)
if not resize_im:
# replace RandomResizedCropAndInterpolation with
# RandomCrop
transform.transforms[0] = transforms.RandomCrop(
args.input_size, padding=4)
return transform
t = []
if resize_im:
size = int((256 / 224) * args.input_size)
t.append(
transforms.Resize(size, interpolation=3), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD))
return transforms.Compose(t)
| StarcoderdataPython |
4952749 | from flask import request, Blueprint
from cusg.utils.http import error_response
from cusg.events.factorys import event_handler_for
from cusg.utils.permissions import restricted
instruction_document_blueprint = Blueprint('instruction_document', __name__)
@instruction_document_blueprint.errorhandler(Exception)
def handle_error(error: Exception):
return error_response(error)
@instruction_document_blueprint.route('/add-doc', methods=('POST',))
@restricted(['ADMIN'])
def add_doc():
""" Create new instruction document """
return event_handler_for(request).get_response()
@instruction_document_blueprint.route('/delete-doc', methods=('POST',))
@restricted(['ADMIN'])
def delete_doc():
""" Delete instruction document """
return event_handler_for(request).get_response()
@instruction_document_blueprint.route('/update-doc', methods=('POST',))
@restricted(['ADMIN'])
def update_doc():
""" Update instruction document """
return event_handler_for(request).get_response()
@instruction_document_blueprint.route('/add-page', methods=('POST',))
@restricted(['ADMIN'])
def add_page():
""" Create new instruction document page """
return event_handler_for(request).get_response()
@instruction_document_blueprint.route('/update-page', methods=('POST',))
@restricted(['ADMIN'])
def update_page():
""" Update instruction document page """
return event_handler_for(request).get_response()
@instruction_document_blueprint.route('/delete-page', methods=('POST',))
@restricted(['ADMIN'])
def delete_page():
""" Delete instruction document page """
return event_handler_for(request).get_response()
@instruction_document_blueprint.route('/list-docs', methods=('POST',))
def list_docs():
""" List instruction documents """
return event_handler_for(request).get_response()
@instruction_document_blueprint.route('/search-docs', methods=('POST',))
def search_docs():
""" List instruction documents """
return event_handler_for(request).get_response()
@instruction_document_blueprint.route('/get-doc', methods=('POST',))
def get_doc():
""" List instruction documents """
return event_handler_for(request).get_response()
| StarcoderdataPython |
6515528 | def grader(score):
if (score > 1) or (score < 0.6):
return 'F'
elif 0.8 < score <= 1:
return 'A'
elif 0.7 < score <= 0.8:
return 'B'
elif 0.6 < score <= 0.7:
return 'C'
elif score >= 0.6:
return 'D' | StarcoderdataPython |
6538416 | import argparse
from datetime import datetime
import csv
import pdfplumber
# headers in the PDF that we'll target
INCOLS = ['COUNTY', 'GENDER', 'ACN', 'APV', 'DEM',
'GRN', 'LBR', 'REP', 'UAF', 'UNI']
# headers for the CSV
OUTCOLS = ['report_date', 'county', 'gender', 'party', 'returned_votes']
def table_parser(table):
'''
given a table from a PDFplumber page -- a list of lists with every
data point for a county and gender total in a single row -- melt
into a tidy list of dictionaries with keys that match `OUTCOLS` above
'''
# list to dump data into
outlist = []
# placeholder value for county, which will be updated as we iterate
county = None
# loop over the rows in the table
for row in table:
# skip the headers
if 'COUNTY' in row[0]:
continue
# if there is a value in the first cell, it's the name
# of the county, so we need to update the placeholder value
if row[0]:
county = row[0]
# gender is in the second cell
gender = row[1]
# now we can chop up the row -- which lists totals for this county and
# this gender for every party -- into several lists, one for each
# party
# first, loop over the list of columns in the PDF data
for i, col in enumerate(INCOLS):
# skip county and gender
if col.upper() in ['COUNTY', 'GENDER']:
continue
else:
# the name of the party is wherever we're at in the list of
# PDF columns -- 'ACN', 'APV', etc.
party = col
# using the index value (`i`) for the PDF columns we're
# iterating over, grab the associated value out of the
# `row` of data we're working on ... while we're at it,
# kill out commas and coerce to integer
count = int(row[i].replace(',', ''))
# turn the whole thing into a dictionary by marrying
# the list of data to the CSV columns defined in the
# `OUTCOLS` variable above
record = dict(zip(OUTCOLS, [report_date, county, gender,
party, count]))
# drop it into the outlist
outlist.append(record)
# return the data list
return outlist
if __name__ == '__main__':
# load up that parser, baby
parser = argparse.ArgumentParser()
# add the one positional argument
parser.add_argument('--pdf', help='Path to CO early vote totals PDF')
args = parser.parse_args()
# get reference to the PDF
pdf_in = args.pdf
# assuming a common filename structure, e.g.
# '20181026BallotsReturnedByAgePartyGender.pdf'
# pull out the date
rawdate = pdf_in.split('/')[-1].split('Ballot')[0]
report_date = datetime.strptime(rawdate, '%Y%m%d').date().isoformat()
# name the CSV file to write to
csv_out = '{}-co-early-vote-totals.csv'.format(report_date)
# open the PDF file and the CSV to write to
with pdfplumber.open(pdf_in) as pdf, open(csv_out, 'w') as outfile:
# create a writer object
writer = csv.DictWriter(outfile, fieldnames=OUTCOLS)
# write out the headers
writer.writeheader()
# create an empty list to dump clean data into
early_voting_data = []
# loop over the pages in the PDF
for page in pdf.pages:
# grab the table on the page
table = page.extract_table()
# the two tables on page 1 actually come in as one table,
# so you need to chop out the bits from the summary table
if page.page_number == 1:
# a placeholder to grab the spot where the ~real~ table
# starts
starting_idx = None
# loop over the table
for i, row in enumerate(table):
# if we get to the line with 'COUNTY' up front,
# we're there, so stop
if 'COUNTY' in row[0]:
starting_idx = i
break
# now we can redefine our table as everything from
# that point onward
table = table[starting_idx:]
# lots going on here!
# x[:-1] means leave off the 'Grand Total' value at the end
# of every line in the PDF -- also, we're gonna skip the
# useless "Voter Party" lines at the top of each table,
# and we'll also skip the county summaries that pop up
# every four lines or so
clean_table = [x[:-1] for x in table if x and 'VOTER\xa0PARTY'
not in x[0] and 'TOTAL' not in x[0].upper()]
# pop this into the our list above
early_voting_data.extend(clean_table)
# parse the data using the function defined above
parsed_data = table_parser(early_voting_data)
# write the contents to file
writer.writerows(parsed_data)
| StarcoderdataPython |
1832134 | <filename>cartpole_control/cartpole_control/controllers.py
"""
controllers.py
Controllers for cartpole swingup and balancing
Two methods: MPC and Energy-shaping
Publishes effort (force) command at specified rate
Implement handlers for state messages
"""
import math
import time
import matplotlib.pyplot as plt
import numpy as np
import rclpy
from gekko import GEKKO as gk
from rclpy.node import Node
from sensor_msgs.msg import JointState
from std_msgs.msg import Float64
from cartpole_interfaces.srv import SetEnergyGains, SetLQRGains
"""
CartPoleSwingUpController
Base class for controllers
"""
class CartPoleSwingUpController(Node):
def __init__(self, node_name, gravity=9.81,
mass_cart=1,
mass_pole=1,
length_pole=0.22,
k_lqr=np.array([-3.2361, 90.2449, -3.6869, 5.4608])):
super().__init__(node_name)
self.state = np.zeros((4,1))
self.k_lqr = k_lqr
self.gravity = gravity
self.mass_cart = mass_cart
self.mass_pole = mass_pole
self.length_pole = length_pole
self.k_lqr_service = self.create_service(SetLQRGains, 'set_lqr_gains', self.lqr_gains_callback)
def lqr_gains_callback(self, request, response):
self.k_lqr = np.array([request.k_x,
request.k_theta,
request.k_x_dot,
request.k_theta_dot])
self.get_logger().info('LQR gains set to: {}'.format(self.k_lqr))
response.result = 1
def unpack_parameters(self):
return (self.gravity,
self.mass_pole,
self.mass_cart,
self.length_pole,
self.k_lqr)
"""
state_estimate_callback
Receives messages from subscribed topic and make adjustments
In particular, PyBullet state publisher plugin flips some signs,
we need to account for this.
"""
def state_estimate_callback(self, msg):
self.state[0] = -msg.position[1]
self.state[1] = -msg.velocity[1]
self.state[2] = msg.position[0]
self.state[3] = msg.velocity[0]
return self.state
"""
control_callback
publishes effort command to topic
"""
def control_callback(self):
effort = Float64()
force = float(self.get_action())
effort.data = force
self.publisher.publish(effort)
"""
get_action
Calls appropriate controller (swingup / LQR) depending on current state
"""
def get_action(self):
if (abs(self.theta_distance(self.state[2],math.pi)) < .3):
return float(self.upright_lqr())
else:
return float(self.swingup())
"""
theta_distance
Compute the signed angle difference between theta and target
"""
def theta_distance(self, theta, target):
return math.atan2(math.sin(theta-target), math.cos(theta-target))
"""
upright_lqr
LQR balancing controller
Elementwise multiplication of predetermined feedback on state errors
See control-examples/cart-pole/generate_lqr.m for more info on gain computation
"""
def upright_lqr(self):
k_lqr = self.k_lqr
x = self.state[0]
x_dot = self.state[1]
theta = self.state[2]
theta_dot = self.state[3]
theta_diff = self.theta_distance(theta,math.pi)
X = np.array([x[0], theta_diff, x_dot[0], theta_dot[0
]])
f = np.dot(k_lqr,X)
return f
def swingup(self):
raise NotImplementedError
"""
CartPoleMPCController
Implements MPC for cartpole using Gekko
"""
class CartPoleMPCController(CartPoleSwingUpController):
def __init__(self):
super().__init__('cartpole_mpc_controller')
self.loop_rate = 10.0
self.position_topic = '/slider_cart_position_controller/command'
self.velocity_topic = '/slider_cart_velocity_controller/command'
self.effort_topic = '/slider_cart_effort_controller/command'
self.publisher = self.create_publisher(Float64,
self.effort_topic,
10)
self.joint_state_topic = '/joint_states'
self.subscriber = self.create_subscription(JointState,
self.joint_state_topic,
self.state_estimate_callback,
10)
self.timer = self.create_timer(1.0/self.loop_rate, self.control_callback)
self.m = gk(remote=False)
# Number of knot points
N = 12
T = 1
# Define time mesh, oversample points closer to present
self.m.time = np.linspace(0,T,N)**2
p = np.zeros(N)
p[-1] = T
final = self.m.Param(value=p)
m1 = self.mass_cart
m2 = self.mass_pole
l = self.length_pole
g = self.gravity
# Specify constraints
x_0 = [0.0,0.0,0.0,0.0]
x_f = [0.5, math.pi, 0.0, 0.0]
pos_lb = -1.0
pos_ub = 1.0
Fmx = 100.0
self.x = self.m.Array(self.m.Var,(4))
self.x[0].lower = pos_lb
self.x[0].upper = pos_ub
for i in range(4):
self.x[i].value = x_0[i]
self.u = self.m.MV(value=0,lb=-Fmx,ub=Fmx)
self.u.STATUS = 1
self.m.Equation(self.x[0].dt() == self.x[2])
self.m.Equation(self.x[1].dt() == self.x[3])
self.m.Equation(self.x[2].dt() == ((l*m2*self.m.sin(self.x[1])*self.x[3]**2 + g*m2*self.m.cos(self.x[1])*self.m.sin(self.x[1]) + self.u)/
(m1+m2*(1-self.m.cos(self.x[1])**2))))
self.m.Equation(self.x[3].dt() == -((l*m2*self.m.cos(self.x[1])*self.m.sin(self.x[1])*self.x[3]**2+self.u*self.m.cos(self.x[1])+(m1+m2)*g*self.m.sin(self.x[1])) /
(l*m1+l*m2*(1-self.m.cos(self.x[1])**2))))
#self.m.Equation((self.x[1]*final - x_f[1])**2 - 0.1 <= 0)
# Optimization Objectives
self.m.Minimize(self.m.integral(self.u**2))
self.m.Minimize(1e2*(self.x[0]*final-x_f[0])**2*final)
self.m.Minimize(1e3*(self.x[1]-x_f[1])**2*final)
#self.m.Minimize(1e5*(self.x[2]*final-x_f[2])**2*final)
#self.m.Minimize(1e5*(self.x[3]*final-x_f[3])**2*final)
self.m.options.IMODE = 6
self.current_action = 0
self.fail_count = 0
self.p, = plt.plot([],[], 'r-')
plt.title('MPC state trajectory')
plt.xlabel('x position')
plt.ylabel('theta')
def update_line(self, new_data):
x_data = new_data[0]
y_data = new_data[1]
self.p.set_xdata(x_data)
self.p.set_ydata(y_data)
plt.draw()
plt.ylim((-5,5))
plt.xlim((-2,2))
plt.pause(0.000001)
"""
swingup
MPC swingup - solve for plusible trajectory
"""
def swingup(self):
try:
self.x[0].value = self.state[0]
self.x[1].value = self.state[2]
self.x[2].value = self.state[1]
self.x[3].value = self.state[3]
self.m.solve()
except:
print('MPC fail')
self.fail_count += 1
if self.fail_count > 5:
raise ValueError('MPC failed to solve for trajectory.. falling back')
return np.array([self.current_action])
self.fail_count = 0
#self.update_line(np.array([self.x[0].value,self.x[1].value]))
self.current_action = self.u.value[1]
return self.current_action
"""
CartPoleEnergyShapingController
Energy shaping controller
k_e: Energy gain, determines how hard the cart tries to swing pole
k_x: [Position, Linear velocity] gains, higher values will regularize cart
and prevent excessive swinging
"""
class CartPoleEnergyShapingController(CartPoleSwingUpController):
def __init__(self, k_e=14, k_x=[1,2]):
super().__init__('cartpole_energy_shaping_controller')
self.loop_rate = 40.0
self.position_topic = '/slider_cart_position_controller/command'
self.velocity_topic = '/slider_cart_velocity_controller/command'
self.effort_topic = '/slider_cart_effort_controller/command'
self.publisher = self.create_publisher(Float64,
self.effort_topic,
10)
self.joint_state_topic = '/joint_states'
self.subscriber = self.create_subscription(JointState,
self.joint_state_topic,
self.state_estimate_callback,
10)
self.timer = self.create_timer(1.0/self.loop_rate, self.control_callback)
self.k_e = k_e
self.k_x = k_x
self.k_e_service = self.create_service(SetEnergyGains, 'set_energy_gains', self.energy_gains_callback)
def energy_gains_callback(self, request, response):
self.k_e = request.k_e
self.k_x = [request.k_x, request.k_x_dot]
self.get_logger().info('Energy gain set to: {}'.format(self.k_e))
self.get_logger().info('Position gain set to: {}'.format(self.k_x[0]))
self.get_logger().info('Velocity gain set to: {}'.format(self.k_x[1]))
response.result = int(1)
# TODO Fix response typeerror
def unpack_parameters(self):
k_e = self.k_e
k_x = self.k_x
return super().unpack_parameters() + (k_e, k_x)
"""
energy
Total energy of the pendulum (potential + kinetic)
Assumes the pendulum is a point mass attached by light rod
"""
def energy(self):
(gravity, mass_pole,
mass_cart,
length_pole,
k_lqr,
k_e,
k_x) = self.unpack_parameters()
theta = self.state[2]
theta_dot = self.state[3]
U = -mass_pole * gravity * length_pole * math.cos(theta)
E = 0.5 * (mass_pole * (length_pole ** 2)) * theta_dot ** 2 + U
return E
"""
swingup
Energy-shaping swingup: Use difference of current energy and potential energy of
upright pole to determine force command
"""
def swingup(self):
(gravity, mass_pole,
mass_cart,
length_pole,
k_lqr,
k_e,
k_x,) = self.unpack_parameters()
Ed = mass_pole*gravity*length_pole
Ediff = self.energy() - Ed
x = self.state[0]
x_dot = self.state[1]
theta = self.state[2]
theta_dot = self.state[3]
c = math.cos(theta)
s = math.sin(theta)
acceleration = k_e * theta_dot * c * Ediff - k_x[0] * x - k_x[1]*x_dot
f = ((mass_pole+mass_cart)*acceleration +
mass_pole*(-acceleration*c-gravity*s)*c -
mass_pole*length_pole*theta_dot**2*s)
return -f
| StarcoderdataPython |
4929336 | BINARY_MODE = 'binary'
MULTICLASS_MODE = 'multiclass'
MULTILABEL_MODE = 'multilabel'
NN_FILL_DOWNSAMPLE = '0'
NN_FILL_UPSAMPLE = '1'
MISSING_DATA_FLAG = '2'
| StarcoderdataPython |
1921625 | <gh_stars>0
# __ __ ____ __
# / /____ _ ____ ___ ____ _ / / / __ ) ____ / /_
# __ / // __ `// __ `__ \ / __ `// / / __ |/ __ \ / __/
# / /_/ // /_/ // / / / / // /_/ // / / /_/ // /_/ // /_
# \____/ \__,_//_/ /_/ /_/ \__,_//_/______/_____/ \____/ \__/
# /_____/
import asyncio
import logging
import pytz
import discord
import jamal_bot_config
import jamal_bot_database
from datetime import datetime
from discord.ext import commands
from mcstatus import MinecraftServer
# Recommended logging in discord.py documention
logging.basicConfig(level=logging.INFO)
# log to jamal_bot.log
logger = logging.getLogger('discord')
logger.setLevel(logging.INFO)
handler = logging.FileHandler(
filename='jamal_bot.log', encoding='utf-8', mode='w')
handler.setFormatter(
logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
# in summary allow users to @mention the bot and use three different cased
# variations of "jamal " with a space
def get_prefix(client, message):
prefixes = ['jamal ', 'Jamal ', 'JAMAL ']
return commands.when_mentioned_or(*prefixes)(client, message)
# only creates the database if it doesn't exist
jamal_bot_database.create_db('jamal_bot_quotes.db')
# requires jamal with a space in order to register
jamal_bot = commands.Bot(command_prefix=get_prefix,
case_insensitive=True,
owner_id=jamal_bot_config.user_config['OWNER_ID'])
# remove the default help command
jamal_bot.remove_command('help')
# jamal connection to discord api
@jamal_bot.event
async def on_ready():
print(f'\nLogged in as: {jamal_bot.user.name} - {jamal_bot.user.id}')
print('Discord.py Version:', discord.__version__)
activity = discord.Game(name='Warframe')
await jamal_bot.change_presence(status=discord.Status.online,
activity=activity)
# Printing done let's pterodactyl know that it's ready
print('Done')
# ignore in DMs
@jamal_bot.check
async def globally_block_dms(ctx):
return ctx.guild is not None
# jamal error handling
@jamal_bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send('Missing required argument, try `jamal help` for help')
# jamal list
# ignore in DMs
@jamal_bot.command()
async def list(ctx):
await ctx.send(f'{jamal_bot_database.get_names()}')
# jamal access {name}
# set name to lowercase as that's how I like setting it up in the database
@jamal_bot.command()
async def access(ctx, name):
name = name.lower()
if jamal_bot_database.check_name(name) is True:
await ctx.send(f'{jamal_bot_database.get_quote(name)}')
else:
await ctx.send(f'"{name}" is not in the database')
# jamal add name|quote {name} {*args}
# must have admin role in order to add quotes
# admin role must be defined in config.yml
@jamal_bot.command()
@commands.has_any_role(jamal_bot_config.user_config['ADMIN_ROLE_ID'])
async def add(ctx, opt, name, *args):
opt = opt.lower()
name = name.lower()
if opt == "name":
if jamal_bot_database.check_name(name) is True:
await ctx.send(f'"{name}" is already in the database')
else:
jamal_bot_database.add_name(name)
await ctx.send(
f'{ctx.message.author.mention} has added '
f'"{name}" to the database')
elif opt == "quote":
if jamal_bot_database.check_name(name) is False:
await ctx.send(f'"{name}" is not in the database')
else:
words = []
for arg in args:
words.append(arg)
quote = " ".join(words)
if quote == "":
await ctx.send('Quote cannot be empty, '
'try `jamal help` for help')
else:
jamal_bot_database.add_quote(name, quote)
await ctx.send(
f'{ctx.message.author.mention} has added "{quote}" '
f'to {name}')
else:
raise discord.ext.commands.MissingRequiredArgument
# jamal remove {name}
# ignore in DMs
# must have admin role in order to remove names
@jamal_bot.command()
@commands.has_any_role(jamal_bot_config.user_config['ADMIN_ROLE_ID'])
async def remove(ctx, opt, name):
opt = opt.lower()
name = name.lower()
if opt == "name":
if jamal_bot_database.check_name(name) is False:
await ctx.send(f'"{name}" is not in the database')
else:
jamal_bot_database.remove_name(name)
await ctx.send(
f'{ctx.message.author.mention} has removed '
'"{name}" from the database')
else:
raise discord.ext.commands.MissingRequiredArgument
# jamal quotes
# random quote game command
# bot will send a random quote and it reads the next message as the guess
# wait 3 seconds for a guess before it timeouts
@jamal_bot.command()
async def quotes(ctx, pass_context=True):
name = jamal_bot_database.random_name()
await ctx.send(f'who said "{jamal_bot_database.get_quote(name)}"')
try:
guess = await jamal_bot.wait_for('message', timeout=6.0)
except asyncio.TimeoutError:
return await ctx.channel.send(
f'you\'re taking too long, it was {name}')
if (str(guess.content)).lower() == name:
await ctx.channel.send('you got em')
else:
await ctx.channel.send(f'WRONG! it\'s {name}')
# jamal status {server_address}
# {server_address} is optional
# ignore in DMs
@jamal_bot.command()
async def status(ctx, server_address=jamal_bot_config.user_config[
'DEFAULT_SERVER_ADDRESS']):
server = MinecraftServer.lookup(server_address)
try:
status = server.status()
server_latency = round(status.latency, 2)
status_embed = discord.Embed(
title=server_address,
description=status.version.name,
colour=discord.Colour.green())
status_embed.add_field(
name='Description',
value=f'```{status.description}```',
inline=False)
status_embed.add_field(
name='Count',
value=f'{status.players.online}/{status.players.max}',
inline=True)
try:
query = server.query()
server_players = (", ".join(query.players.names))
status_embed.add_field(
name="Players",
value=f'\u200b{server_players}',
inline=True)
status_embed.set_footer(
text=f'Ping: {server_latency} ms')
await ctx.send(embed=status_embed)
except Exception:
status_embed.set_footer(text=f'Ping: {server_latency} ms')
await ctx.send(embed=status_embed)
except Exception:
error_embed = discord.Embed(
title='Could not contact server',
colour=discord.Colour.red())
await ctx.send(embed=error_embed)
# jamal time
# bot returns an easy to read embed displaying different timezones
@jamal_bot.command()
async def time(ctx):
timezone_UTC = pytz.utc
timezone_EL = pytz.timezone('Europe/London')
timezone_ET = pytz.timezone('US/Eastern')
timezone_CT = pytz.timezone('US/Central')
timezone_PT = pytz.timezone('US/Pacific')
datetime_UTC = datetime.now(timezone_UTC)
datetime_EL = datetime.now(timezone_EL)
datetime_ET = datetime.now(timezone_ET)
datetime_CT = datetime.now(timezone_CT)
datetime_PT = datetime.now(timezone_PT)
time_embed = discord.Embed(colour=discord.Colour.purple())
time_embed.set_author(name='jamal bot time')
time_embed.add_field(
name='Universal',
value=datetime_UTC.strftime('%b %d %I:%M %p (%H:%M)'),
inline=False)
time_embed.add_field(
name='Europe/London',
value=datetime_EL.strftime('%b %d %I:%M %p (%H:%M)'),
inline=False)
time_embed.add_field(
name='US/Eastern',
value=datetime_ET.strftime('%b %d %I:%M %p (%H:%M)'),
inline=False)
time_embed.add_field(
name='US/Central',
value=datetime_CT.strftime('%b %d %I:%M %p (%H:%M)'),
inline=False)
time_embed.add_field(
name='US/Pacific',
value=datetime_PT.strftime('%b %d %I:%M %p (%H:%M)'),
inline=False)
await ctx.send(embed=time_embed)
# jamal help
# bot returns an easy to read embed explaining how to use the commands
@jamal_bot.command()
async def help(ctx):
help_embed = discord.Embed(colour=discord.Colour.blurple())
help_embed.set_author(name='jamal bot help')
help_embed.add_field(
name='Display this help message',
value='Usage: `jamal help`',
inline=False)
help_embed.add_field(
name='Display all available names in the database',
value='Usage: `jamal list`',
inline=False)
help_embed.add_field(
name='Send a random quote and guess who said it',
value='Usage: `jamal quotes`',
inline=False)
help_embed.add_field(
name='Send a random quote from someone',
value='Usage: `jamal access <name>`\nEx. `jamal access kevin`',
inline=False)
help_embed.add_field(
name='Add a name to the database',
value='Usage: `jamal add name <name>`\nEx. `jamal add name kevin`',
inline=False)
help_embed.add_field(
name='Add a quote to the database',
value='Usage: `jamal add quote <name> <quote>`'
'\nEx. `jamal add quote kevin she said give me armor`',
inline=False)
help_embed.add_field(
name='Remove a name and their quotes from the database',
value='Usage: `jamal remove name <name>`'
'\nEx. `jamal remove name kevin`',
inline=False)
help_embed.add_field(
name='Display the status of a minecraft server',
value='Usage: `jamal status [address]`'
'\nEx. `jamal status hypixel.net`',
inline=False)
help_embed.add_field(
name='Display the time in different regions',
value='Usage: `jamal time`',
inline=False)
await ctx.send(embed=help_embed)
# Run jamal_bot_core
jamal_bot.run(jamal_bot_config.user_config['DISCORD_API_KEY'], bot=True,
reconnect=True)
| StarcoderdataPython |
6617015 | <gh_stars>0
from django import forms
from clientpage.models import *
class OrganizerForm(forms.ModelForm):
class Meta:
model = Locations
fields = ['name','phonenumber','image', 'latitude', 'longitude', 'address','description']
| StarcoderdataPython |
4837319 | <filename>opensauce/snack.py
"""F0 and formant estimation using Snack Sound Toolkit
Snack can be called in several ways:
1) On Windows, Snack can be run via a standalone binary executable
2) Snack can be called through the Python/Tkinter inteface
3) Snack can be called on the system command line through the Tcl shell
"""
# Licensed under Apache v2 (see LICENSE)
# Based on VoiceSauce files func_SnackPitch.m (authored by <NAME>),
# func_SnackFormants.m (authored by <NAME>), and
# vs_ParameterEstimation.m
from __future__ import division
from sys import platform
from subprocess import call
from conf.userconf import user_snack_lib_path
import os
import sys
import inspect
import numpy as np
import logging
log = logging.getLogger('opensauce.snack')
# Variable names for Snack formant and bandwidth vectors
sformant_names = ['sF1', 'sF2', 'sF3', 'sF4', 'sB1', 'sB2', 'sB3', 'sB4']
valid_snack_methods = ['exe', 'python', 'tcl']
def snack_pitch(wav_fn, method, data_len, frame_shift=1,
window_size=25, max_pitch=500, min_pitch=40,
tcl_shell_cmd=None):
"""Return F0 and voicing vectors estimated by Snack Sound Toolkit
Use Snack ESPS method to estimate the pitch (F0) and voicing values for
each frame. Includes padding to fill out entire data vectors. The Snack
pitch values don't start until a half frame into the audio, so the first
half-frame is NaN.
Args:
wav_fn - WAV file to be processed [string]
method - Method to use for calling Snack [string]
data_len - Length of measurement vector [integer]
frame_shift - Length of each frame in ms [integer]
(default = 1)
window_size - Length of window used for ESPS method in ms [integer]
(default = 25)
max_pitch - Maximum valid F0 allowed in Hz [integer]
(default = 500)
min_pitch - Minimum valid F0 allowed in Hz [integer]
(default = 40)
tcl_shell_cmd - Command to run Tcl shell [string]
(default = None)
Returns:
F0 - F0 estimates [NumPy vector]
V - Voicing [NumPy vector]
method refers to the way in which Snack is called (not to be confused with
the Snack ESPS method):
'exe' - Call Snack via Windows executable
'python' - Call Snack via Python's tkinter Tcl interface
'tcl' - Call Snack via Tcl shell
data_len is the number of data (time) points that will be output to the
user. All measurement vectors need to have this length.
windows_size and frame_shift are in milliseconds, max_pitch and min_pitch
in Hertz. Note the default parameter values are those used in VoiceSauce.
tcl_shell_cmd is the name of the command to invoke the Tcl shell. This
argument is only used if method = 'tcl'.
For more details about the Snack pitch calculation, see the manual:
http://www.speech.kth.se/snack/man/snack2.2/tcl-man.html#spitch
"""
# Get raw Snack F0 and V vectors
F0_raw, V_raw = snack_raw_pitch(wav_fn, method, frame_shift, window_size, max_pitch, min_pitch, tcl_shell_cmd)
# Pad F0 and V with NaN
# First half frame is NaN
pad_head_F0 = np.full(np.int_(np.floor(window_size / frame_shift / 2)), np.nan)
# Pad end with NaN
pad_tail_F0 = np.full(data_len - (len(F0_raw) + len(pad_head_F0)), np.nan)
F0_out = np.hstack((pad_head_F0, F0_raw, pad_tail_F0))
# First half frame is NaN
pad_head_V = np.full(np.int_(np.floor(window_size / frame_shift / 2)), np.nan)
# Pad end with NaN
pad_tail_V = np.full(data_len - (len(V_raw) + len(pad_head_V)), np.nan)
V_out = np.hstack((pad_head_V, V_raw, pad_tail_V))
return F0_out, V_out
def snack_raw_pitch(wav_fn, method, frame_shift=1, window_size=25,
max_pitch=500, min_pitch=40, tcl_shell_cmd=None):
"""Return raw F0 and voicing vectors estimated by Snack Sound Toolkit
Args:
See snack_pitch() documentation.
snack_raw_pitch() doesn't have the data_len argument.
Returns:
F0 - Raw F0 estimates [NumPy vector]
V - Raw voicing [NumPy vector]
The vectors returned here are the raw Snack output, without padding.
For more info, see documentation for snack_pitch().
"""
if method in valid_snack_methods:
if method == 'exe': # pragma: no cover
F0_raw, V_raw = snack_raw_pitch_exe(wav_fn, frame_shift, window_size, max_pitch, min_pitch)
elif method == 'python':
F0_raw, V_raw = snack_raw_pitch_python(wav_fn, frame_shift, window_size, max_pitch, min_pitch)
elif method == 'tcl': # pragma: no branch
F0_raw, V_raw = snack_raw_pitch_tcl(wav_fn, frame_shift, window_size, max_pitch, min_pitch, tcl_shell_cmd)
else: # pragma: no cover
raise ValueError('Invalid Snack calling method. Choices are {}'.format(valid_snack_methods))
return F0_raw, V_raw
def snack_raw_pitch_exe(wav_fn, frame_shift, window_size, max_pitch, min_pitch): # pragma: no cover
"""Implement snack_raw_pitch() by calling Snack through a Windows
standalone binary executable
Note this method can only be used on Windows.
The vectors returned here are the raw Snack output, without padding.
For more info, see documentation for snack_raw_pitch().
"""
# Check that we are on a Windows machine
if platform != 'win32' and platform != 'cygwin':
raise ValueError("Cannot use 'exe' as Snack calling method on non-Windows machine")
# Call Snack using system command to run standalone executable
exe_path = os.path.join(os.path.dirname(__file__), 'Windows', 'snack.exe')
snack_cmd = [exe_path, 'pitch', wav_fn, '-method', 'esps']
snack_cmd.extend(['-framelength', str(frame_shift / 1000)])
snack_cmd.extend(['-windowlength', str(window_size / 1000)])
snack_cmd.extend(['-maxpitch', str(max_pitch)])
snack_cmd.extend(['-minpitch', str(min_pitch)])
return_code = call(snack_cmd)
if return_code != 0:
raise OSError('snack.exe error')
# Path for f0 file corresponding to wav_fn
f0_fn = wav_fn.split('.')[0] + '.f0'
# Load data from f0 file
if os.path.isfile(f0_fn):
F0_raw, V_raw = np.loadtxt(f0_fn, dtype=float, usecols=(0,1), unpack=True)
# Cleanup and remove f0 file
os.remove(f0_fn)
else:
raise OSError('snack.exe error -- unable to locate .f0 file')
return F0_raw, V_raw
def snack_raw_pitch_python(wav_fn, frame_shift, window_size, max_pitch, min_pitch):
"""Implement snack_raw_pitch() by calling Snack through Python's tkinter
library
Note this method can only be used if the user's machine is setup, so that
Tcl/Tk can be accessed through Python's tkinter library.
The vectors returned here are the raw Snack output, without padding.
For more info, see documentation for snack_raw_pitch().
"""
try:
import tkinter
except ImportError:
try:
import Tkinter as tkinter
except ImportError: # pragma: no cover
print("Need Python library tkinter. Is it installed?")
# HACK: Need to replace single backslash with two backslashes,
# so that the Tcl shell reads the file path correctly on Windows
if sys.platform == 'win32' or sys.platform == 'cygwin': # pragma: no cover
wav_fn = wav_fn.replace('\\', '\\\\')
# XXX I'm assuming Hz for pitch; the docs don't actually say that.
# http://www.speech.kth.se/snack/man/snack2.2/tcl-man.html#spitch
tcl = tkinter.Tcl()
try:
# XXX This will trigger a message 'cannot open /dev/mixer' on the
# console if you don't have a /dev/mixer. You don't *need* a mixer to
# snack the way we are using it, but there's no practical way to
# suppress the message without modifying the snack source. Fortunately
# most people running opensauce will in fact have a /dev/mixer.
tcl.eval('package require snack')
except tkinter.TclError as err: # pragma: no cover
log.critical('Cannot load snack (is it installed?): %s', err)
return
tcl.eval('snack::sound s')
tcl.eval('s read {}'.format(wav_fn))
cmd = ['s pitch -method esps']
cmd.extend(['-framelength {}'.format(frame_shift / 1000)])
cmd.extend(['-windowlength {}'.format(window_size / 1000)])
cmd.extend(['-maxpitch {}'.format(max_pitch)])
cmd.extend(['-minpitch {}'.format(min_pitch)])
# Run Snack pitch command
tcl.eval('set data [{}]'.format(' '.join(cmd)))
# XXX check for errors here and log and abort if there is one. Result
# string will start with ERROR:.
# Collect results and save in return variables
num_frames = int(tcl.eval('llength $data'))
F0_raw = np.empty(num_frames)
V_raw = np.empty(num_frames)
# snack returns four values per frame, we only care about the first two.
for i in range(num_frames):
values = tcl.eval('lindex $data ' + str(i)).split()
F0_raw[i] = np.float_(values[0])
V_raw[i] = np.float_(values[1])
return F0_raw, V_raw
def snack_raw_pitch_tcl(wav_fn, frame_shift, window_size, max_pitch, min_pitch, tcl_shell_cmd):
"""Implement snack_raw_pitch() by calling Snack through Tcl shell
tcl_shell_cmd is the name of the command to invoke the Tcl shell.
Note this method can only be used if Tcl is installed.
The vectors returned here are the raw Snack output, without padding.
For more info, see documentation for snack_raw_pitch().
"""
# File path for wav file provided to Tcl script
in_file = wav_fn
# ERROR: wind_dur parameter must be between [0.0001, 0.1].
# ERROR: frame_step parameter must be between [1/sampling rate, 0.1].
# invalid/inconsistent parameters -- exiting.
# HACK: Tcl shell expects double backslashes in Windows path
if sys.platform == 'win32' or sys.platform == 'cygwin': # pragma: no cover
in_file = in_file.replace('\\', '\\\\')
# Name of the file containing the Tcl script
tcl_file = os.path.join(os.path.dirname(wav_fn), 'tclforsnackpitch.tcl')
# Write Tcl script which will call Snack pitch calculation
f = open(tcl_file, 'w')
script = "#!/usr/bin/env bash\n"
script += '# the next line restarts with tclsh \\\n'
script += 'exec {} "$0" "$@"\n\n'.format(tcl_shell_cmd)
# HACK: The variable user_snack_lib_path is a hack we use in continous
# integration testing. The reason is that we may not have the
# permissions to copy the Snack library to the standard Tcl library
# location. This is a workaround to load the Snack library from a
# different location, where the location is given by
# user_snack_lib_path.
if user_snack_lib_path is not None:
script += 'pkg_mkIndex {} snack.tcl libsnack.dylib libsound.dylib\n'.format(user_snack_lib_path)
script += 'lappend auto_path {}\n\n'.format(user_snack_lib_path)
script += 'package require snack\n\n'
script += 'snack::sound s\n\n'
script += 's read {}\n\n'.format(in_file)
script += 'set fd [open [file rootname {}].f0 w]\n'.format(in_file)
script += 'puts $fd [join [s pitch -method esps -framelength {} -windowlength {} -maxpitch {} -minpitch {}]\n\n]\n'.format(frame_shift / 1000, window_size / 1000, max_pitch, min_pitch)
script += 'close $fd\n\n'
script += 'exit'
f.write(script)
f.close()
# Run the Tcl script
try:
return_code = call([tcl_shell_cmd, tcl_file])
except OSError:
os.remove(tcl_file)
raise OSError('Error while attempting to call Snack via Tcl shell. Is Tcl shell command {} correct?'.format(tcl_shell_cmd))
else:
if return_code != 0: # pragma: no cover
os.remove(tcl_file)
raise OSError('Error when trying to call Snack via Tcl shell script.')
# Load results from the f0 file output by the Tcl script
# And save into return variables
f0_file = os.path.splitext(wav_fn)[0] + '.f0'
if os.path.isfile(f0_file):
data = np.loadtxt(f0_file, dtype=float).reshape((-1,4))
F0_raw = data[:, 0]
V_raw = data[:, 1]
# Cleanup and remove f0 file
os.remove(f0_file)
else: # pragma: no cover
raise OSError('Snack Tcl shell error -- unable to locate .f0 file')
# Cleanup and remove Tcl script file
os.remove(tcl_file)
return F0_raw, V_raw
def snack_formants(wav_fn, method, data_len, frame_shift=1,
window_size=25, pre_emphasis=0.96, lpc_order=12,
tcl_shell_cmd=None):
"""Return formant and bandwidth vectors estimated by Snack Sound Toolkit
Use Snack to estimate first four formants and bandwidths for each frame.
Includes padding to fill out entire data vectors. The Snack pitch
values don't start until a half frame into the audio, so the first
half-frame is NaN.
Args:
wav_fn - WAV file to be processed [string]
method - Method to use for calling Snack [string]
data_len - Length of measurement vector [integer]
frame_shift - Length of each frame in ms [integer]
(default = 1)
window_size - Length of window used for ESPS method in ms [integer]
(default = 25)
pre_emphasis - Preemphasis applied before windowing [float]
(default = 0.96)
lpc_order - Order of LPC analysis [integer]
(default = 12)
tcl_shell_cmd - Command to run Tcl shell [string]
(default = None)
Returns:
estimates - Formant and bandwidth vectors [dictionary of NumPy vectors]
method refers to the way in which Snack is called:
'exe' - Call Snack via Windows executable
'python' - Call Snack via Python's tkinter Tcl interface
'tcl' - Call Snack via Tcl shell
data_len is the number of data (time) points that will be output to the
user. All measurement vectors need to have this length.
window_size and frame_shift are in milliseconds. pre_emphasis is used to
specify the amount of preemphasis applied to the signal prior to windowing.
lpc_order is order for LPC analysis.
Note the default parameter values are those used in VoiceSauce.
The following parameters are fixed: The windowtype is set to Hamming,
lpctype is set to 0 (autocorrelation), and ds_freq is set to 10000 Hz.
The parameter lpctype is related to the LPC analysis and ds_freq specifies
the sampling rate of the data to be used in formant analysis.
tcl_shell_cmd is the name of the command to invoke the Tcl shell. This
argument is only used if method = 'tcl'
The estimates dictionary uses keys:
'sF1', 'sF2', 'sF3', 'sF4', 'sB1', 'sB2', 'sB3', 'sB4'
('sF1' is the first Snack Formant, 'sB2' is the second Snack bandwidth
vector, etc.) and each entry is a NumPy vector of length data_len
For more details about the Snack formant calculation, see the manual:
http://www.speech.kth.se/snack/man/snack2.2/tcl-man.html#sformant
"""
# Compute raw formant and bandwidth estimates using Snack
estimates_raw = snack_raw_formants(wav_fn, method, frame_shift, window_size, pre_emphasis, lpc_order, tcl_shell_cmd)
# Pad estimates with NaN
estimates = {}
for n in sformant_names:
# First half frame is NaN
pad_head = np.full(np.int_(np.floor(window_size / frame_shift / 2)), np.nan)
# Pad end with NaN
pad_tail = np.full(data_len - (len(estimates_raw[n]) + len(pad_head)), np.nan)
estimates[n] = np.hstack((pad_head, estimates_raw[n], pad_tail))
return estimates
def snack_raw_formants(wav_fn, method, frame_shift=1, window_size=25,
pre_emphasis=0.96, lpc_order=12, tcl_shell_cmd=None):
"""Return raw formant and bandwidth vectors estimated by Snack Sound Toolkit
Args:
See snack_formants() documentation.
snack_raw_formants() doesn't have the data_len argument.
Returns:
estimates_raw - Raw formant and bandwidth vectors [dictionary of NumPy vectors]
The vectors returned here are the raw Snack output, without padding.
For more info, see documentation for snack_formants().
"""
if method in valid_snack_methods:
if method == 'exe': # pragma: no cover
estimates_raw = snack_raw_formants_exe(wav_fn, frame_shift, window_size, pre_emphasis, lpc_order)
elif method == 'python':
estimates_raw = snack_raw_formants_python(wav_fn, frame_shift, window_size, pre_emphasis, lpc_order)
elif method == 'tcl': # pragma: no branch
estimates_raw = snack_raw_formants_tcl(wav_fn, frame_shift, window_size, pre_emphasis, lpc_order, tcl_shell_cmd)
else: # pragma: no cover
raise ValueError('Invalid Snack calling method. Choices are {}'.format(valid_snack_methods))
return estimates_raw
def snack_raw_formants_exe(wav_fn, frame_shift, window_size, pre_emphasis, lpc_order): # pragma: no cover
"""Implement snack_raw_formants() by calling Snack through a Windows
standalone binary executable
Note this method can only be used on Windows.
The vectors returned here are the raw Snack output, without padding.
For more info, see documentation for snack_raw_formants().
"""
# Check that we are on a Windows machine
if platform != 'win32' and platform != 'cygwin':
raise ValueError("Cannot use 'exe' as Snack calling method on non-Windows machine")
# Call Snack using system command to run standalone executable
exe_path = os.path.join(os.path.dirname(__file__), 'Windows', 'snack.exe')
snack_cmd = [exe_path, 'formant', wav_fn]
snack_cmd.extend(['-windowlength', str(window_size / 1000)])
snack_cmd.extend(['-framelength', str(frame_shift / 1000)])
snack_cmd.extend(['-windowtype', 'Hamming'])
snack_cmd.extend(['-lpctype', '0'])
snack_cmd.extend(['-preemphasisfactor', str(pre_emphasis)])
snack_cmd.extend(['-ds_freq', '10000'])
snack_cmd.extend(['-lpcorder', str(lpc_order)])
return_code = call(snack_cmd)
if return_code != 0:
raise OSError('snack.exe error')
# Path for frm file corresponding to wav_fn
frm_fn = wav_fn.split('.')[0] + '.frm'
# Load data from frm file
if os.path.isfile(frm_fn):
frm_results = np.loadtxt(frm_fn, dtype=float)
# Cleanup and remove frm file
os.remove(frm_fn)
else:
raise OSError('snack.exe error -- unable to locate .frm file')
# Save data into dictionary
num_cols = frm_results.shape[1]
estimates_raw = {}
for i in range(num_cols):
estimates_raw[sformant_names[i]] = frm_results[:, i]
return estimates_raw
def snack_raw_formants_python(wav_fn, frame_shift, window_size, pre_emphasis, lpc_order):
"""Implement snack_raw_formants() by calling Snack through Python's tkinter
library
Note this method can only be used if the user's machine is setup,
so that Tcl/Tk can be accessed through Python's tkinter library.
The vectors returned here are the raw Snack output, without padding.
For more info, see documentation for snack_raw_formants().
"""
try:
import tkinter
except ImportError:
try:
import Tkinter as tkinter
except ImportError: # pragma: no cover
print("Need Python library tkinter. Is it installed?")
# HACK: Need to replace single backslash with two backslashes,
# so that the Tcl shell reads the file path correctly on Windows
if sys.platform == 'win32' or sys.platform == 'cygwin': # pragma: no cover
wav_fn = wav_fn.replace('\\', '\\\\')
# XXX I'm assuming Hz for pitch; the docs don't actually say that.
# http://www.speech.kth.se/snack/man/snack2.2/tcl-man.html#spitch
tcl = tkinter.Tcl()
try:
# XXX This will trigger a message 'cannot open /dev/mixer' on the
# console if you don't have a /dev/mixer. You don't *need* a mixer to
# snack the way we are using it, but there's no practical way to
# suppress the message without modifying the snack source. Fortunately
# most people running opensauce will in fact have a /dev/mixer.
tcl.eval('package require snack')
except tkinter.TclError as err: # pragma: no cover
log.critical('Cannot load snack (is it installed?): %s', err)
return
tcl.eval('snack::sound s')
tcl.eval('s read {}'.format(wav_fn))
cmd = ['s formant']
cmd.extend(['-windowlength {}'.format(window_size / 1000)])
cmd.extend(['-framelength {}'.format(frame_shift / 1000)])
cmd.extend(['-windowtype Hamming'])
cmd.extend(['-lpctype 0'])
cmd.extend(['-preemphasisfactor {}'.format(pre_emphasis)])
cmd.extend(['-ds_freq 10000'])
cmd.extend(['-lpcorder {}'.format(lpc_order)])
# Run Snack formant command
tcl.eval('set data [{}]'.format(' '.join(cmd)))
# XXX check for errors here and log and abort if there is one. Result
# string will start with ERROR:.
# Collect results in dictionary
num_frames = int(tcl.eval('llength $data'))
num_cols = len(sformant_names)
estimates_raw = {}
for n in sformant_names:
estimates_raw[n] = np.empty(num_frames)
for i in range(num_frames):
values = tcl.eval('lindex $data ' + str(i)).split()
for j in range(num_cols):
estimates_raw[sformant_names[j]][i] = np.float_(values[j])
return estimates_raw
def snack_raw_formants_tcl(wav_fn, frame_shift, window_size, pre_emphasis, lpc_order, tcl_shell_cmd):
"""Implement snack_formants() by calling Snack through Tcl shell
tcl_shell_cmd is the name of the command to invoke the Tcl shell.
Note this method can only be used if Tcl is installed.
The vectors returned here are the raw Snack output, without padding.
For more info, see documentation for snack_raw_formants().
"""
# File path for wav file provided to Tcl script
in_file = wav_fn
# ERROR: wind_dur parameter must be between [0.0001, 0.1].
# ERROR: frame_step parameter must be between [1/sampling rate, 0.1].
# invalid/inconsistent parameters -- exiting.
# HACK: Tcl shell expects double backslashes in Windows path
if sys.platform == 'win32' or sys.platform == 'cygwin': # pragma: no cover
in_file = in_file.replace('\\', '\\\\')
tcl_file = os.path.join(os.path.dirname(wav_fn), 'tclforsnackformant.tcl')
# Write Tcl script to compute Snack formants
f = open(tcl_file, 'w')
script = "#!/usr/bin/env bash\n"
script += '# the next line restarts with tclsh \\\n'
script += 'exec {} "$0" "$@"\n\n'.format(tcl_shell_cmd)
# HACK: The variable user_snack_lib_path is a hack we use in continous
# integration testing. The reason is that we may not have the
# permissions to copy the Snack library to the standard Tcl library
# location. This is a workaround to load the Snack library from a
# different location, where the location is given by
# user_snack_lib_path.
if user_snack_lib_path is not None:
script += 'pkg_mkIndex {} snack.tcl libsnack.dylib libsound.dylib\n'.format(user_snack_lib_path)
script += 'lappend auto_path {}\n\n'.format(user_snack_lib_path)
script += 'package require snack\n\n'
script += 'snack::sound s\n\n'
script += 's read {}\n\n'.format(in_file)
script += 'set fd [open [file rootname {}].frm w]\n'.format(in_file)
script += 'puts $fd [join [s formant -windowlength {} -framelength {} -windowtype Hamming -lpctype 0 -preemphasisfactor {} -ds_freq 10000 -lpcorder {}]\n\n]\n'.format(window_size / 1000, frame_shift / 1000, pre_emphasis, lpc_order)
script += 'close $fd\n\n'
script += 'exit'
f.write(script)
f.close()
# Run Tcl script
try:
return_code = call([tcl_shell_cmd, tcl_file])
except OSError: # pragma: no cover
os.remove(tcl_file)
raise OSError('Error while attempting to call Snack via Tcl shell. Is Tcl shell command {} correct?'.format(tcl_shell_cmd))
else:
if return_code != 0: # pragma: no cover
os.remove(tcl_file)
raise OSError('Error when trying to call Snack via Tcl shell script.')
# Load results from f0 file and save into return variables
frm_file = os.path.splitext(wav_fn)[0] + '.frm'
num_cols = len(sformant_names)
if os.path.isfile(frm_file):
frm_results = np.loadtxt(frm_file, dtype=float).reshape((-1, num_cols))
estimates_raw = {}
for i in range(num_cols):
estimates_raw[sformant_names[i]] = frm_results[:, i]
# Cleanup and remove f0 file
os.remove(frm_file)
else: # pragma: no cover
raise OSError('Snack Tcl shell error -- unable to locate .frm file')
# Cleanup and remove Tcl script file
os.remove(tcl_file)
return estimates_raw
| StarcoderdataPython |
3213691 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: <NAME>, Cisco Systems, Inc.
from django.core.urlresolvers import reverse # noqa
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used and when not.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
class Nexus1000vTest(test.BaseAdminViewTests):
@test.create_stubs({api.neutron: ('profile_list',
'profile_bindings_list'),
api.keystone: ('tenant_list',)})
def test_index(self):
tenants = self.tenants.list()
net_profiles = self.net_profiles.list()
policy_profiles = self.policy_profiles.list()
net_profile_binding = self.network_profile_binding.list()
policy_profile_binding = self.policy_profile_binding.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.neutron.profile_bindings_list(
IsA(http.HttpRequest),
'network').AndReturn(net_profile_binding)
api.neutron.profile_bindings_list(
IsA(http.HttpRequest),
'policy').AndReturn(policy_profile_binding)
api.keystone.tenant_list(
IsA(http.HttpRequest)).AndReturn([tenants, False])
api.keystone.tenant_list(
IsA(http.HttpRequest)).AndReturn([tenants, False])
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:router:nexus1000v:index'))
self.assertTemplateUsed(res, 'router/nexus1000v/index.html')
| StarcoderdataPython |
3321734 | from app.account.views import account # noqa
| StarcoderdataPython |
4948198 | <reponame>justindavies/Fluid
#!/usr/bin/python
from utils import *
import pandas as pd
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.recurrent import LSTM, GRU
from keras.layers import Convolution1D, MaxPooling1D, AtrousConvolution1D, RepeatVector
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, CSVLogger, EarlyStopping
from keras.layers.wrappers import Bidirectional
from keras import regularizers
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import *
from keras.optimizers import RMSprop, Adam, SGD, Nadam
from keras.initializers import *
from pymongo import MongoClient
from keras.utils.np_utils import to_categorical
import sys
from collections import Counter
import math
import os
import pickle
import datetime
import keras
class LossHistory(keras.callbacks.Callback):
#self.mongo_id = mongo_id
def __init__(self, mongo_id, mongo_res):
self.mongo_id = mongo_id
self.mongo_res = mongo_res
def on_train_begin(self, logs=[]):
self.losses = []
def on_epoch_end(self, batch, logs=()):
del(logs['lr'])
self.losses.append(logs)
self.mongo_res.update_one({"_id": self.mongo_id}, {"$set": {"history": self.losses}})
if (len(sys.argv) > 1):
instrument = sys.argv[1]
mode = sys.argv[2]
window = int(sys.argv[3])
forecast = int(sys.argv[4])
else:
instrument = os.environ['INSTRUMENT']
mode = os.environ['MODE']
window = int(os.environ['WINDOW'])
forecast = int(os.environ['FORECAST'])
if mode == "dev":
client = MongoClient('mongodb://localhost:27017')
else:
client = MongoClient('mongodb://mongo:27017')
#db.ticks.createIndex({ time: 1})
print("Instrument is " + instrument)
db = client.fluid
run_collection = db.run_statistics
ticksdb = db.candles
fundamentalsdb = db.fundamentals
candles = ticksdb.find({"ticker": instrument}).sort('date', 1)
fundamentals = fundamentalsdb.find({"ticker": instrument}).sort('date', 1)
openp = []
highp = []
lowp = []
closep = []
volumep = []
#ticks = ticksdb.find({"instrument": instrument, "time":{"$gte": time_ago }}).sort('time', 1)
fundies = []
for candle in candles:
fundamental = fundamentalsdb.find({"ticker": instrument, "date":{"$lte": candle['date'] }}).sort('date', -1)
for f in fundamental:
fundies.append(f)
break
if len(fundies) > 0:
openp.append(float(candle['adj_open']))
highp.append(float(candle['adj_high']))
lowp.append(float(candle['adj_low']))
closep.append(float(candle['adj_close']))
volumep.append(float(candle['adj_volume']))
print len(fundies)
print(len(openp))
fundamentals = []
for key in fundies[0]:
tmp_arr = []
if key != "_id" and key != "date" and key != "Quarter end" and key != "ticker":
for f in fundies:
if f[key] == "None":
f[key] = 0
tmp_arr.append(float(f[key]))
if len(tmp_arr) > 0:
fundamentals.append(tmp_arr)
np.savetxt('test.csv', fundamentals, delimiter=',')
# data_chng = data_original.ix[:, 'Adj Close'].pct_change().dropna().tolist()
WINDOW = 30
EMB_SIZE = 5
STEP = 1
FORECAST = 1
X, Y = [], []
for i in range(0, len(closep), STEP):
tmp_arr = []
try:
o = openp[i:i+WINDOW]
h = highp[i:i+WINDOW]
l = lowp[i:i+WINDOW]
c = closep[i:i+WINDOW]
v = volumep[i:i+WINDOW]
for f in fundamentals:
f = f[i:i+WINDOW]
#print f
#f = (np.array(f) - np.mean(f)) / np.std(f)
tmp_arr.append(f)
o = (np.array(o) - np.mean(o)) / np.std(o)
h = (np.array(h) - np.mean(h)) / np.std(h)
l = (np.array(l) - np.mean(l)) / np.std(l)
c = (np.array(c) - np.mean(c)) / np.std(c)
v = (np.array(v) - np.mean(v)) / np.std(v)
x_i = closep[i:i+WINDOW]
y_i = closep[i+WINDOW+FORECAST]
last_close = x_i[-1]
next_close = y_i
if last_close < next_close:
y_i = [1, 0]
else:
y_i = [0, 1]
tmp_arr.append(o)
tmp_arr.append(h)
tmp_arr.append(l)
tmp_arr.append(c)
tmp_arr.append(v)
x_i = np.column_stack(tmp_arr)
x_i = np.column_stack((o, h, l ,v, c))
except Exception as e:
print e
break
X.append(x_i)
Y.append(y_i)
X, Y = np.array(X), np.array(Y)
X_train, X_test, Y_train, Y_test = create_Xt_Yt(X, Y)
#EMB_SIZE = len(X_train[0])
print X_train.shape
#exit()
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], EMB_SIZE))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], EMB_SIZE))
run_creation = {"instrument": instrument, "window": window, "forecast": forecast, "start": datetime.datetime.utcnow(), "examples": len(openp), "history":[]}
id = run_collection.insert_one( run_creation )
history = LossHistory(id.inserted_id, run_collection)
model = Sequential()
model.add(Convolution1D(input_shape = (WINDOW, EMB_SIZE),
nb_filter=32,
filter_length=8,
border_mode='same'))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(Dropout(0.25))
model.add(Convolution1D(nb_filter=16,
filter_length=4,
border_mode='same'))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(Dropout(0.25))
model.add(Convolution1D(nb_filter=8,
filter_length=4,
border_mode='same'))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(Dense(2))
model.add(Activation('softmax'))
opt = Nadam(lr=0.002)
reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.9, patience=30, min_lr=0.000001, verbose=0)
checkpointer = ModelCheckpoint(filepath="lolkek.hdf5", verbose=0, save_best_only=True)
model.compile(optimizer=opt,
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(X_train, Y_train,
nb_epoch = 100,
batch_size = 128,
verbose=2,
validation_data=(X_test, Y_test),
callbacks=[reduce_lr, checkpointer, history],
shuffle=True)
model.load_weights("lolkek.hdf5")
pred = model.predict(np.array(X_test))
run_collection.update_one({"_id": id.inserted_id}, {"$set": {"end": datetime.datetime.utcnow()}})
| StarcoderdataPython |
3513895 | #! /usr/bin/env python3.6
import tkinter as tk
from ColorList import color_list
root = tk.Tk()
root.title("Ubuntu Color Display for Tkinter")
frame = tk.Frame(root)
frame.configure(background = 'slate gray')
frame.pack(fill = 'both', expand = True, side = 'top')
'''
These two for loops configure the columns and rows to expand to
fill the space when the window is resized
'''
frame_row = 0
frame_column = 0
for i in range(30):
frame.columnconfigure(frame_column, weight = 1)
frame_column += 1
for i in range(19):
frame.rowconfigure(frame_row, weight = 1)
frame_row += 1
'''
The code for the tool tip style pop up which will display the
name of the color in each box when the mouse hovers over it
'''
class ToolTip():
def __init__(self, widget):
self.widget = widget
self.tip_window = None
def show_tip(self, tip_text):
# Display color name in a tooltip window
if self.tip_window or not tip_text:
return
x, y, _cx, cy = self.widget.bbox("insert") # get size of widget
x = x = self.widget.winfo_rootx() + 25 # calculate to display tooltip
y = y + cy + self.widget.winfo_rooty() + 50 # below and to the right
self.tip_window = tw = tk.Toplevel(self.widget) # create new tip window
tw.wm_overrideredirect(True) # remove window manager decorations
tw.wm_geometry("+%d+%d" % (x, y)) # create window size
label = tk.Label(tw, text=tip_text, justify=tk.LEFT,
background="#ffffe0", relief=tk.SOLID, borderwidth=1,
font=("tahome", "8", "normal"))
label.configure(foreground = 'black')
label.pack(ipadx=1)
def hide_tip(self):
tw = self.tip_window
self.tip_window = None
if tw:
tw.destroy()
def create_ToolTip(widget, text):
toolTip = ToolTip(widget) # Create instance of class
def enter(event):
toolTip.show_tip(text)
def leave(event):
toolTip.hide_tip()
widget.bind('<Enter>', enter) # Bind mouse events
widget.bind('<Leave>', leave)
# End tool tip code block
'''
Create the 558 buttons colored with each color in the list
variable color_list using a for loop
'''
row = 0
column = 0
num = 1
for color in color_list:
button = 'button' + str(num) # the incrementing button variable
button = tk.Button(frame) # create the button using the incremented name
button.configure(background = color, activebackground = color)
button.grid(row = row, column = column, sticky = 'nsew')
create_ToolTip(button, color)
num += 1
if column < 29: # continue on a row until 30 columns are reached
row = row
column += 1
else: # once 30 columns have displayed add a row and return to column 0
row += 1
column = 0
# The exit button located at the bottom of the window
button = tk.Button(frame, text = 'Exit', command = root.destroy,
background = '#48483E', foreground = '#CFD0C2')
button.grid(row = 20, column = 0, columnspan = 29, pady = (10))
root.mainloop() | StarcoderdataPython |
12838463 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
MCMC-estimation of status transition rates from IUCN record
Created on Mon Oct 28 14:43:44 2019
@author: <NAME> (<EMAIL>)
"""
import numpy as np
np.set_printoptions(suppress=True)
import pandas as pd
import os,sys
import datetime
from scipy.optimize import curve_fit
import warnings
import iucn_sim.functions as cust_func
# get extinction probs_________________________________________________________
def p_e_year(years,p_e):
pe_year = 1-(1-float(p_e))**(1/years)
return pe_year
def update_multiplier(q,d=1.1):
u = np.random.uniform(0,1)
l = 2*np.log(d)
m = np.exp(l*(u-.5))
new_q = q * m
return new_q, np.log(m)
def sample_rate_mcmc(count, tot_time, n_samples = 1, n_gen = 100000,burnin = 1000):
def get_loglik(count, dT, rate):
return np.log(rate)*count - dT*rate
post_samples = []
q = 0.01
likA = get_loglik(count,tot_time,q)
for i in range(n_gen):
new_q, hast = update_multiplier(q)
lik = get_loglik(count,tot_time,new_q)
if lik-likA + hast >= np.log(np.random.random()):
q = new_q
likA = lik
if i > burnin and i % 10==0:
post_samples.append(q)
sampled_rates = np.random.choice(post_samples,n_samples,replace=False)
return sampled_rates
def power_function(x,a,b):
# defining the power function
y = float(a)*x**float(b)
return y
def make_empty_rate_df(species_list,rate_columns,status_label):
rate_df = pd.DataFrame(np.zeros((len(species_list),rate_columns+1)))
rate_df.columns = ['species']+ ['%s_p_ext_%i'%(status_label,i) for i in np.arange(0,rate_columns)]
rate_df.species = species_list
return rate_df
def add_arguments(parser):
parser.add_argument(
'--species_data',
required=True,
metavar='<path>',
help="File containing species list and current IUCN status of species, as well as generation length (GL) data estimates if available. GL data is only used for '--extinction_probs_mode 0' ('species_data.txt' output from get_iucn_data function).",
)
parser.add_argument(
'--iucn_history',
required=True,
metavar='<path>',
help="File containing IUCN history of the reference group for transition rate estimation ('*_iucn_history.txt' output of get_iucn_data function)."
)
parser.add_argument(
'--outdir',
required=True,
metavar='<path>',
help="Provide path to outdir where results will be saved."
)
parser.add_argument(
'--extinction_probs_mode',
default=0,
metavar='N',
help="Set to '0' to use the critE EX mode to determine extinction probabilities for each status (e.g. Mooers et al, 2008 approach). Set to '1' to use empirical EX mode, based on the recorded extinction in the IUCN history of the reference group (e.g. Monroe et al, 2019 approach). GL data can only be used in the critE EX mode ('0')."
)
parser.add_argument(
'--possibly_extinct_list',
default=0,
metavar='<path>',
help="File containing list of taxa that are likely extinct, but that are listed as extant in IUCN, including the year of their assessment as possibly extinct ('possibly_extinct_reference_taxa.txt' output from get_iucn_data function). These species will then be modeled as extinct by the esimate_rates function, which will effect the estimated extinction probabilities when chosing `--extinction_probs_mode 1`",
)
parser.add_argument(
'--species_specific_regression',
action='store_true',
help='Enables species-specific regression fitting to model LC, NT, and VU extinction probabilities. Only applicable with --extinction_probs_mode 0 (critE mode) and if GL is provided.',
default=False
)
parser.add_argument(
'--rate_samples',
default=100,
metavar='N',
help="How many rates to sample from the posterior transition rate estimates. These rates will be used to populate transition rate q-matrices for downstream simulations. Later on you can still chose to run more simulation replicates than the here specified number of produced transition rate q-matrices, in which case the `run_sim` function will randomely resample from the available q-matrices (default=100, this is ususally sufficient, larger numbers can lead to very high output file size volumes)."
)
parser.add_argument(
'--n_gen',
default=100000,
metavar='N',
help="Number of generations for MCMC for transition rate estimation (default=100000)."
)
parser.add_argument(
'--burnin',
default=1000,
metavar='N',
help="Burn-in for MCMC for transition rate estimation (default=1000)."
)
parser.add_argument(
'--seed',
default=None,
help="Set random seed for the MCMC."
)
def main(args):
# get user input___________________________________________________________
input_data = args.species_data
iucn_history = args.iucn_history
outdir = args.outdir
try:
extinction_probs_mode = int(args.extinction_probs_mode)
except:
print('\nInvalid extinction_probs_mode provided. Please choose between the currenlty available options 0 or 1')
quit()
possibly_extinct_list = args.possibly_extinct_list
n_rep = int(args.rate_samples)
n_gen = int(args.n_gen)
burnin = int(args.burnin)
if not os.path.exists(outdir):
os.makedirs(outdir)
seed = args.seed
try:
random_seed = False
seed = int(seed)
except:
seed = np.random.randint(999999999)
random_seed = True
np.random.seed(seed)
np.savetxt(os.path.join(outdir,'starting_seed.txt'),np.array([seed]),fmt='%i')
# get input data
species_data_input = pd.read_csv(input_data,sep='\t',header=None).dropna()
invalid_status_taxa = species_data_input[~species_data_input.iloc[:,1].isin(['LC','NT','VU','EN','CR','DD','NE'])]
if len(invalid_status_taxa)>0:
print('\nFound invalid IUCN statuses:',list(invalid_status_taxa[1].values),'\n\nMake sure that the second column of your --species_data input contains the current IUCN status of your target species, which must be one of the following valid extant statuses: LC, NT, VU, EN, CR, DD, NE')
# if this effects only a minority of taxa, continue after removing these
if len(invalid_status_taxa)/len(species_data_input) < 0.5:
print('\nAutomatically dropping the following taxa because of invalid IUCN status information:', list(invalid_status_taxa[0].values))
species_data_input = species_data_input[species_data_input.iloc[:,1].isin(['LC','NT','VU','EN','CR','DD','NE'])]
else:
quit('\nPlease fix your species_data input file. Check presence of current IUCN status information and column order.')
# get the list of species
species_list = species_data_input.iloc[:,0].values.astype(str)
# replace underscores in species name in case they are present
species_list = np.array([i.replace('_',' ') for i in species_list])
# Check if all species names are binomial
for species in species_list:
if len(species.split(' ')) != 2:
print('ERROR','*'*50,'\nABORTED: All provided species names provided under --species_data flag must be binomial! Found non binomial name:\n%s\n'%species,'*'*50)
quit()
# get the current IUCN status of all species
current_status = species_data_input.iloc[:,1].values.astype(str)
# get GL data if provided
gl_data_available = False
if species_data_input.shape[1] > 2:
gl_matrix = species_data_input.iloc[:,2:].values
gl_data_available = True
#__________________________________________________________________________
# process the IUCN history data____________________________________________
iucn_start_year = 2001 #start-year of the IUCN3.1 standard for categories
current_year = datetime.datetime.now().year
master_stat_time_df = pd.DataFrame(columns=['species']+list(np.arange(iucn_start_year,current_year+1).astype(str)))
statuses_through_time = pd.read_csv(iucn_history, delimiter = '\t')
target_columns = [column for column in master_stat_time_df.columns if column in statuses_through_time.columns]
master_stat_time_df[target_columns] = statuses_through_time[target_columns]
# treat EW as EX
master_stat_time_df.replace('EW', 'EX',inplace=True)
# replace occurrences of NR (not recognized) with nan
master_stat_time_df.replace('NR', np.nan,inplace=True)
# clean and sort df
master_stat_time_df = master_stat_time_df.sort_values(by='species')
master_stat_time_df = master_stat_time_df.drop_duplicates()
master_stat_time_df.index = np.arange(len(master_stat_time_df))
# set the assessment at the current year to NE for species without any assessments
na_row_indeces = np.where(master_stat_time_df.iloc[:,1:].T.isnull().all().values)
for index in na_row_indeces:
master_stat_time_df.iloc[index,-1] = 'NE'
# if possibly_extinct_list provided, read that list and set the status for those taxa to extinct, starting at provided year
if possibly_extinct_list:
pex_data = pd.read_csv(possibly_extinct_list,sep='\t')
pex_species_list = pex_data.iloc[:,0].values.astype(str)
pex_year = pex_data.iloc[:,1].values.astype(int)
column_names = master_stat_time_df.columns.values
row_names = master_stat_time_df.species.values
#df_selection = master_stat_time_df[master_stat_time_df.species.isin(pex_species_list)]
for i,species in enumerate(pex_species_list):
row_index = np.where(row_names==species)[0][0]
assessment_year = pex_year[i]
column_index = np.where(column_names==str(assessment_year))[0][0]
master_stat_time_df.iloc[row_index,column_index:] = 'EX'
# extract most recent valid status for each taxon
valid_status_dict,most_recent_status_dict,status_series,taxon_series = cust_func.extract_valid_statuses(master_stat_time_df)
# extinciton prob mode 0: remove all currently extinct taxa
if extinction_probs_mode == 0:
ext_indices = np.array([num for num,i in enumerate(most_recent_status_dict.keys()) if most_recent_status_dict[i] == 'EX'])
master_stat_time_df = master_stat_time_df.drop(ext_indices)
master_stat_time_df.index = np.arange(len(master_stat_time_df))
# replace any occurrence of 'EX' as a past status with NaN to avoid problems with counting types of transitions (treating these assessments as invalid)
master_stat_time_df.replace('EX', np.nan,inplace=True)
# extinciton prob mode 1: remove only taxa that have been extinct all along, keeping those that have recorded transition to extinct within time frame
elif extinction_probs_mode == 1:
ext_indices = np.array([num for num,i in enumerate(master_stat_time_df.iloc[:,1:].values.astype(str)) if 'EX' in np.unique(i) and len(np.unique(i))==2])
master_stat_time_df = master_stat_time_df.drop(ext_indices)
master_stat_time_df.index = np.arange(len(master_stat_time_df))
# write IUCN history df to file
master_stat_time_df.to_csv(os.path.join(outdir,'formatted_iucn_history_reference_taxa.txt'),sep='\t')
# extract most recent valid status for each taxon
valid_status_dict,most_recent_status_dict,status_series,taxon_series = cust_func.extract_valid_statuses(master_stat_time_df)
# count current status distribution
unique, counts = np.unique(status_series, return_counts=True)
print('\nCurrent IUCN status distribution in reference group:',dict(zip(unique, counts)))
# count how often each status change occurs
change_type_dict = cust_func.count_status_changes(master_stat_time_df,valid_status_dict)
print('Summing up years spend in each category ...')
years_in_each_category = cust_func.get_years_spent_in_each_category(master_stat_time_df,valid_status_dict)
# write the status change data to file
final_years_count_array = np.array([list(years_in_each_category.keys()),list(years_in_each_category.values())]).T
np.savetxt(os.path.join(outdir,'years_spent_in_each_category.txt'),final_years_count_array,fmt='%s\t%s')
change_type_dict_array = np.array([list(change_type_dict.keys()),list(change_type_dict.values())]).T
np.savetxt(os.path.join(outdir,'change_type_dict.txt'),change_type_dict_array,fmt='%s\t%s')
#__________________________________________________________________________
# sample transition rates for all types of changes_________________________
if extinction_probs_mode == 0:
status_change_coutn_df = pd.DataFrame(data=np.zeros([6,6]).astype(int),index = ['LC','NT','VU','EN','CR','DD'],columns=['LC','NT','VU','EN','CR','DD'])
elif extinction_probs_mode == 1:
status_change_coutn_df = pd.DataFrame(data=np.zeros([7,7]).astype(int),index = ['LC','NT','VU','EN','CR','DD','EX'],columns=['LC','NT','VU','EN','CR','DD','EX'])
for status_change in change_type_dict.keys():
states = status_change.split('->')
original_state = states[0]
new_state = states[1]
count = change_type_dict[status_change]
status_change_coutn_df.loc[original_state,new_state] = count
status_change_coutn_df.to_csv(os.path.join(outdir,'status_change_counts.txt'),sep='\t',index=True)
print('Counted the following transition occurrences in IUCN history of reference group:')
print(status_change_coutn_df)
if not random_seed:
print('Running MCMC with user-set starting seed %i ...'%seed)
else:
print('Running MCMC with randomely generated starting seed %i ...'%seed)
sampled_rates_df = pd.DataFrame(columns = ['status_change']+ ['rate_%i'%i for i in np.arange(0,n_rep)])
for status_a in status_change_coutn_df.columns:
row = status_change_coutn_df.loc[status_a]
for status_b in row.index.values:
if not status_a == status_b:
count = row[status_b]
total_time = years_in_each_category[status_a]
rates = sample_rate_mcmc(count, total_time, n_samples = n_rep, n_gen = n_gen, burnin = burnin)
sampled_rates_df = sampled_rates_df.append(pd.DataFrame(data=np.matrix(['%s->%s'%(status_a,status_b)]+list(rates)),columns = ['status_change']+ ['rate_%i'%i for i in np.arange(0,n_rep)]),ignore_index=True)
sampled_rates_df[['rate_%i'%i for i in np.arange(0,n_rep)]] = sampled_rates_df[['rate_%i'%i for i in np.arange(0,n_rep)]].apply(pd.to_numeric)
sampled_rates_df.to_csv(os.path.join(outdir,'sampled_status_change_rates.txt'),sep='\t',index=False,float_format='%.8f')
print('Sampled %i rates from MCMC posterior for each transition type.'%n_rep)
#__________________________________________________________________________
# if mode 0, calculate extinction probabilities for EN and CR with GL data_________________________
if extinction_probs_mode == 0:
# calculate yearly extinction risks for categories EN and CR
if gl_data_available:
dims = gl_matrix.shape[1]
en_risks = []
for gl_array in gl_matrix:
if dims == 1:
gl_array = np.array(gl_array)
#replace all nan values with the standard en extinction risk
en_risks_species = p_e_year(np.minimum(np.maximum([20]*len(gl_array),5*gl_array),100),0.2)
n_nan = len(en_risks_species[en_risks_species!=en_risks_species])
en_risks_species[en_risks_species!=en_risks_species] = [p_e_year(20,0.2)]*n_nan
en_risks.append(en_risks_species)
en_risks = np.array(en_risks)
else:
print('Warning: No generation length (GL) data found. Extinction risks for status EN and CR are calculated without using GL data.')
dims = 1
en_risks = np.array([[p_e_year(20,0.2)]]*len(species_list))
en_risks_df = make_empty_rate_df(species_list,dims,'EN')
en_risks_df.iloc[:,1:] = en_risks
en_risks_df.to_csv(os.path.join(outdir,'en_extinction_risks_all_species.txt'),sep='\t',index=False, float_format='%.12f')
if gl_data_available:
dims = gl_matrix.shape[1]
cr_risks = []
for gl_array in gl_matrix:
if dims == 1:
gl_array = np.array(gl_array)
#replace all nan values with the standard en extinction risk
cr_risks_species = p_e_year(np.minimum(np.maximum([10]*len(gl_array),3*gl_array),100),0.5)
n_nan = len(cr_risks_species[cr_risks_species!=cr_risks_species])
cr_risks_species[cr_risks_species!=cr_risks_species] = [p_e_year(10,0.5)]*n_nan
cr_risks.append(cr_risks_species)
cr_risks = np.array(cr_risks)
else:
dims = 1
cr_risks = np.array([[p_e_year(10,0.5)]]*len(species_list))
cr_risks_df = make_empty_rate_df(species_list,dims,'CR')
cr_risks_df.iloc[:,1:] = cr_risks
cr_risks_df.to_csv(os.path.join(outdir,'cr_extinction_risks_all_species.txt'),sep='\t',index=False, float_format='%.12f')
if args.species_specific_regression:
# make regression for all other categories based on EN and CR risks
print('Fitting species-specific regression function to determine LC, NT, and VU extinction probabilities ...')
vu_risks_df = make_empty_rate_df(species_list,dims,'VU')
nt_risks_df = make_empty_rate_df(species_list,dims,'NT')
lc_risks_df = make_empty_rate_df(species_list,dims,'LC')
for i,species in enumerate(cr_risks_df.species.values):
en_risks = en_risks_df.iloc[i,1:].values
cr_risks = cr_risks_df.iloc[i,1:].values
vu_risks = []
nt_risks = []
lc_risks = []
for j,_ in enumerate(en_risks):
en_prob = en_risks[j]
cr_prob = cr_risks[j]
x = [4.,5.]
y = [en_prob,cr_prob]
# fitting the power function to the 2 data points of each species (EN and CR risk)
with warnings.catch_warnings():
# this is to avoid printing the warning from curve_fit when trying to fit function to only 2 points: "OptimizeWarning: Covariance of the parameters could not be estimated"
warnings.filterwarnings("ignore")
a_b = curve_fit(power_function,x,y);
# extracting the values for a and b from the curve fit function
a = a_b[0][0]
b = a_b[0][1]
# get values for LC, NT, and VU
p_year_LC = power_function(1,a,b)
p_year_NT = power_function(2,a,b)
p_year_VU = power_function(3,a,b)
vu_risks.append(p_year_VU)
nt_risks.append(p_year_NT)
lc_risks.append(p_year_LC)
vu_risks_df.iloc[vu_risks_df[vu_risks_df.species == species].index.values[0],1:] = np.array(vu_risks)
nt_risks_df.iloc[nt_risks_df[nt_risks_df.species == species].index.values[0],1:] = np.array(nt_risks)
lc_risks_df.iloc[lc_risks_df[lc_risks_df.species == species].index.values[0],1:] = np.array(lc_risks)
vu_risks_df.to_csv(os.path.join(outdir,'vu_extinction_risks_all_species.txt'),sep='\t',index=False, float_format='%.12f')
nt_risks_df.to_csv(os.path.join(outdir,'nt_extinction_risks_all_species.txt'),sep='\t',index=False, float_format='%.12f')
lc_risks_df.to_csv(os.path.join(outdir,'lc_extinction_risks_all_species.txt'),sep='\t',index=False, float_format='%.12f')
#__________________________________________________________________________
# populate q-matrices______________________________________________________
print("\nPopulating species-specific q-matrices ...")
sampled_rates_df.index = sampled_rates_df.status_change.values
if extinction_probs_mode == 0:
transition_rates = sampled_rates_df.iloc[:,1:]
# randomely sample cr and en extinction probs to be used in q-matrices.
if n_rep <= dims:
sample_columns = np.random.choice(np.arange(dims),size=n_rep,replace=False)
# since there are only as many cr and en p(ex) estimates as there are provided GL values, we may have to resample some (but make sure all are present at least once)
else:
sample_columns1 = np.random.choice(np.arange(dims),size=dims,replace=False)
sample_columns2 = np.random.choice(np.arange(dims),size=(n_rep-dims),replace=True)
sample_columns = np.concatenate([sample_columns1,sample_columns2])
# get the corresponding en and cr ex-risk columns
cr_risks_selection = cr_risks_df.iloc[:,1:].values[:,sample_columns]
en_risks_selection = en_risks_df.iloc[:,1:].values[:,sample_columns]
# smae for the vu, nt, and lc cats if species_specific_regression is activated
if args.species_specific_regression:
vu_risks_selection = vu_risks_df.iloc[:,1:].values[:,sample_columns]
nt_risks_selection = nt_risks_df.iloc[:,1:].values[:,sample_columns]
lc_risks_selection = lc_risks_df.iloc[:,1:].values[:,sample_columns]
elif extinction_probs_mode == 1:
target_keys = [i for i in sampled_rates_df.status_change.values if i[-2:] == 'EX']
ex_probs = sampled_rates_df[sampled_rates_df.status_change.isin(target_keys)].iloc[:-1,1:].values.T
transition_rates = sampled_rates_df[~sampled_rates_df.status_change.isin(target_keys)].iloc[:30,1:]
for i in np.arange(n_rep):
rates_i = transition_rates.iloc[:,i]
sys.stdout.write('\rProgress: %i %%'%int(((i+1)/n_rep)*100))
# for each rep (i), create list of q-matrices, 1 for each species
if extinction_probs_mode == 0:
cr_risks_rep = cr_risks_selection[:,i]
en_risks_rep = en_risks_selection[:,i]
if args.species_specific_regression:
vu_risks_rep = vu_risks_selection[:,i]
nt_risks_rep = nt_risks_selection[:,i]
lc_risks_rep = lc_risks_selection[:,i]
q_matrix_list_i = []
for j,__ in enumerate(species_list):
en_risk = en_risks_rep[j]
cr_risk = cr_risks_rep[j]
if args.species_specific_regression:
lc_nt_vu = [lc_risks_rep[j],nt_risks_rep[j],vu_risks_rep[j]]
else:
lc_nt_vu = [0.000000155728,0.000041551152,0.001053050310]
status_specific_p_e = np.array(lc_nt_vu+[en_risk,cr_risk]) # These values are the category specific probabilities of extinction per year calculated from IUCN definition of each category
q_matrix = cust_func.qmatrix(rates_i, status_specific_p_e)
q_matrix_list_i.append([q_matrix])
elif extinction_probs_mode == 1:
q_matrix_list_i = []
status_specific_p_e = ex_probs[i]
q_matrix = cust_func.qmatrix(rates_i, status_specific_p_e)
q_matrix_list_i = []
for spec in species_list:
q_matrix_list_i.append([q_matrix])
q_matrix_list_i_copy = q_matrix_list_i.copy()
if i == 0:
qmatrix_list_dict = dict(zip(list(species_list),q_matrix_list_i_copy)).copy()
else:
update_dict = [qmatrix_list_dict[species].append(q_matrix_list_i_copy[i][0]) for i, species in enumerate(list(species_list))]
print('\n')
#__________________________________________________________________________
# get transition rates for DD______________________________________________
dd_changes = []
dd_rates = []
for row_id,change_type in enumerate(transition_rates.index.values):
states = change_type.split('->')
if states[0] == 'DD':
dd_changes.append('-'.join(states))
rates = transition_rates[transition_rates.index==change_type].values
dd_rates.append(rates[0])
dd_probs = dd_rates/sum(np.array(dd_rates))
#__________________________________________________________________________
# Finally write all the compiled info to a pickle file_____________________
species_specific_data = [[species,current_status[i],qmatrix_list_dict[species]]for i,species in enumerate(species_list)]
final_output_data = [species_specific_data,dd_probs]
cust_func.save_obj(final_output_data,os.path.join(outdir,'simulation_input_data.pkl'))
#__________________________________________________________________________
| StarcoderdataPython |
391418 | <gh_stars>10-100
import re
import os
class CommandHandler(object):
def __init__(self, command_tokens, stdout, stderr):
self.cmd_tokens = command_tokens
self.cmd_str = " ".join(command_tokens)
self.stdout = [l.strip() for l in stdout.split("\n") if len(l.strip()) > 0]
self.stderr = [l.strip() for l in stderr.split("\n") if len(l.strip()) > 0]
def handle_stderr(self):
return {}
def handle_stdout(self):
return {}
def handle_command(self):
return {}
def get_version(self):
return {}
class FiletypeHandler(object):
def __init__(self, path):
self.path = path
def check_integrity(self):
return {}
def make_metadata(self):
return {}
################################################################################
class FindCommandHandler(CommandHandler):
def handle_command(self):
try:
s = re.search(r'.* -name (.*)$|\s.*', self.cmd_str, re.M|re.I)
return {
"name": s.group(1)
}
except:
return {}
def handle_stdout(self):
import os
results = 0
lengths = {}
for line in self.stdout:
if len(line) == 0:
continue
results += 1
fields = line.split(os.path.sep)
last = fields[-1]
if len(last) not in lengths:
lengths[len(last)] = 0
lengths[len(last)] += 1
return {
"results": results,
"lengths": lengths,
}
class BowtieCommandHandler(CommandHandler):
def handle_command(self):
import util
import glob
interesting = {
"-1": "reads1",
"-2": "reads2",
"-x": "btindex",
"--un": "unaligned",
"-S": "out",
"-U": "reads",
}
meta = {"leftover": []}
skip = False
fields = self.cmd_tokens
for field_i, field in enumerate(fields):
if skip:
skip = False
continue
if field in interesting:
try:
if field == "-x":
h = util.hashfiles(glob.glob(fields[field_i + 1] + "*"))
else:
#h = util.get_file_record(fields[field_i + 1])["digest"]
h = util.hashfile(fields[field_i + 1])
except:
pass
h = 0
meta[interesting[field]] = "%s (%s)" % (fields[field_i + 1], h)
skip = True
continue
meta["leftover"].append(field)
return meta
def handle_stderr(self):
try:
return {
"alignment": float(self.stderr[-1].split("%")[0].strip())
}
except IndexError:
return {}
################################################################################
class BamFileHandler(FiletypeHandler):
def check_integrity(self):
from subprocess import check_output
reads = 0
try:
p = check_output("samtools view -c %s" % self.path, shell=True)
reads = int(p.split("\n")[0].strip())
except Exception as e:
pass
has_index = False
has_indate_index = None
if os.path.exists(self.path + ".bai"):
has_index = True
if os.path.getmtime(self.path) <= os.path.getmtime(self.path + ".bai"):
has_indate_index = True
else:
has_indate_index = False
return {
("has_reads", "has 0 reads"): reads > 0,
("has_index", "has no BAI"): has_index,
("has_indate_index", "has a BAI older than itself"): has_indate_index,
}
def make_metadata(self):
from subprocess import check_output
try:
p = check_output("samtools view -c %s" % self.path, shell=True)
return {"read_n": p.split("\n")[0].strip()}
except:
return {}
class VcfFileHandler(FiletypeHandler):
def check_integrity(self):
from subprocess import check_output
variants = 0
try:
p = check_output("grep -vc '^#' %s" % self.path, shell=True)
variants = p.split("\n")[0].strip()
except Exception as e:
pass
return {
("has_variants", "has 0 variants"): variants > 0,
}
def make_metadata(self):
from subprocess import check_output
try:
p = check_output("grep -vc '^#' %s" % self.path, shell=True)
return {"snp_n": p.split("\n")[0].strip()}
except:
return {}
class FastaFileHandler(FiletypeHandler):
def check_integrity(self):
return {
("not_empty", "is empty"): os.path.getsize(self.path) > 0,
}
def make_metadata(self):
from subprocess import check_output
try:
p = check_output("grep -c '^>' %s" % self.path, shell=True)
return {"read_n": p.split("\n")[0].strip()}
except:
return {}
class FastqFileHandler(FiletypeHandler):
def check_integrity(self):
return {
("not_empty", "is empty"): os.path.getsize(self.path) > 0,
}
def make_metadata(self):
return {}
class ErrFileHandler(FiletypeHandler):
def check_integrity(self):
return {
("empty", "is not empty"): os.path.getsize(self.path) == 0,
}
def make_metadata(self):
return {}
| StarcoderdataPython |
1728067 | import json
import logging
import requests
from functools import reduce
from urllib.parse import urljoin
from flask import Blueprint, render_template, flash, redirect, url_for, current_app
from app import db
from app.planet.models import Planet
planet_bp = Blueprint('planet', __name__)
@planet_bp.route('/')
def index():
return render_template('./planet/index.html')
@planet_bp.route('/init-db/')
def populate_db():
initialize_db()
error_message = None
urls = get_all_urls('http://swapi.dev/api/planets/')
for url in urls:
response = requests.get(url)
planet = response.json()
try:
planet['population'] = None if planet['population'] == 'unknown' else planet['population']
new_planet = Planet(name=planet['name'],
rotation_period=planet['rotation_period'],
orbital_period=planet['orbital_period'],
diameter=planet['diameter'],
climate=planet['climate'],
gravity=planet['gravity'],
terrain=planet['terrain'],
surface_water=planet['surface_water'],
population=planet['population'])
db.session.add(new_planet)
db.session.commit()
except Exception as e:
logging.info(e)
error_message = "Erro ao salvar o planeta {name}. Erro: {planet}".format(name=planet['name'], planet=planet)
flash(error_message)
break
if not error_message:
flash("Planetas salvos com sucesso.")
return redirect(url_for("index"))
@planet_bp.route('/planets/')
def get_planets():
base_url = current_app.config['BASE_URL']
try:
url = reduce(urljoin, [base_url, "/api/planets/"])
response = requests.get(url)
except Exception as e:
flash("Erro ao listar os planetas. Erro: {}".format(e))
return redirect(url_for("index"))
else:
if response.json()['status_code'] == 200:
planets = response.json()
return render_template('./planet/planets.html', planets=json.dumps(planets['response']['results']))
else:
flash(response.json()['message'])
return redirect(url_for("index"))
def initialize_db():
db.drop_all()
db.create_all()
def get_all_urls(url):
urls = []
has_next = True
while has_next:
response = requests.get(url)
json_data = json.loads(response.content)
for resource in json_data['results']:
urls.append(resource['url'])
if bool(json_data['next']):
url = json_data['next']
else:
has_next = False
return urls
| StarcoderdataPython |
3398110 | import argparse
import os
import os.path as osp
from shutil import copyfile
import mmcv
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--annotation_path", default="/data/coco_train.json")
parser.add_argument("--image_root", default="/data/train")
parser.add_argument("--output_root", default="/data/sources")
return parser.parse_args()
def main():
args = parse_args()
annotations = mmcv.load(args.annotation_path)
for sample in tqdm(annotations["images"]):
source_root = osp.join(args.output_root, sample["source"])
os.makedirs(source_root, exist_ok=True)
copyfile(osp.join(args.image_root, sample["file_name"]), osp.join(source_root, sample["file_name"]))
if __name__ == "__main__":
main()
| StarcoderdataPython |
1781532 | import math
x = 0.112861
print(type(x)) # float
# complex son deb kvadrat ildiz ostidagi -1 son olinadi
# complex sonlar "j" bilan yoziladi
y = 10+5j
print(type(y))
print(y)
# type conversion (data typni o`zgartirish)
# convert from int to float
a = 10; # x is integer
b = float(a) # we are converting from float into integer
print(b)
# convert from float to int
pi = 3.14 # floating point number
c = int(pi) # we are converting
print(c) # output is 3 because it removes decimal numbers
# convert from int to complex
d = complex(a) # we can convert from int to complex but we can`t convert from complex to int!
print(d)
# converting from string to int
num = "15" # data type is string
print(type(num))
num = int(num) # We are converting from string to int
print(type(num)) # data type is int>
# Arithmetic operators
"""
+ (qo`shish) x+y
- (Ayrish) x-y
* (ko`paytirish) x*y
/ (bo`lish) x/y bo`lganda kasr qismi bilan chiqarib beradi
% (modulus) ikki sonni bo`lganda qoldiqni topish x%y
// (qoldiqsiz bo`lish) faqat butun qismini chiqarib beradi x//y
** (Darajaga oshirish) x**y
"""
x = 12
y = 7
print(x/y)
print(x//y)
print(x%y)
print(x**y)
# built-in funksiyalar
# round(x, y) 'x' ning verguldan keyingi 'y' xonasigacha yaxlitlaydi
num = 3.1415973123
print(round(num, 2)) # natija 3.14 chunki verguldan keyin 2tasini qoldirib qoganini tashab yuboradi
num = 136.213
print(round(num, -2)) # natija 100 chunki '-2' verguldan oldingi 2ta sonni yaxlitlashni boshlaydi 3<5 va 100
# abs(x) 'x' ning musbat qiymatini qaytaradi
son = -13
print(abs(son)) # natija 13
# max(x,y) 'x' va 'y' ning eng kattasini qaytaradi
x = 24.5
y = 13.7
print(max(x,y)) # natija 24.5
# min(x,y) 'x' va 'y' ning eng kichigini qaytaradi
print(min(x,y)) # natija 13.7
# pow(x, y) 'x' ning 'y' darajasini chiqarib beradi
x = 3
y = 4
print(pow(x,y)) # natija '81' chunki 3*3*3*3=81
# Math library
"""
Biz math libraryni pycharmga qo`shish uchun "import math" ni kiritishimiz kk!
ceil(x) 'x'ning tepadan eng yaqin qiymatini qaytaradi!
floor(x) 'x' ning pastdan eng kichik qiymatini qaytaradi!
gcd(x,y) 'x' va 'y'ning EKUBini qaytarib beradi!
exp(x) Yevklit sonininig x-darajasini qaytaradi!
prod(x,y) x va y ning ko`paytmasini qaytaradi!
remainder(x,y) 'x' ni 'y' ga bo`lganda qoldig`ini qaytaradi!
pow(x,y) 'x' ning 'y'-darajasini qaytaradi
sqrt(x) 'x' ninig kvadrat ildizini qaytaradi
log(x[,base]) x ning natural logarifmni qaytaradi, base orqali asosini o`zgartirish mumkin
log2(x) 'x' ning 2 asosli logarifmini hisoblaydi
log10(x) 'x' ning 10- asosli logarifmini hisoblaydi
"""
print(math.sqrt(16))
print(math.log(27,3)) # '27' '3' ning nechinchi darajasi
print(math.floor(14.99999))
print(math.gcd(15,31))
print(math.e**2)
print(math.exp(2))
# Trigonometrik funksiyalar
"""
sin(x) 'x' ning radian qiymatidagi sinusini aniqlaydi
cos(x) 'x' ning radian qiymatidagi kosinusini aniqlaydi
tan(x) 'x' ning radian qiymatidagi taginusini aniqlaydi
asin(x) 'x' ning radian qiymatidagi arksinusini aniqlaydi
degrees(x) "x"ni radiandan gradusga o`tkazadi
radians(x) "x" gradusdan radianga o`tkazadi
"""
| StarcoderdataPython |
11329710 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2014 The python-semanticversion project
# This code is distributed under the two-clause BSD License.
"""Test the various functions from 'base'."""
from .compat import unittest, is_python2
from semantic_version import base
class ComparisonTestCase(unittest.TestCase):
def test_identifier_cmp(self):
cases = [
# Integers
('1', '1', 0),
('1', '2', -1),
('11', '2', 1),
('3333', '40', 1),
# Text
('aa', 'ab', -1),
('aa', 'aa', 0),
('ab', 'aa', 1),
('aaa', 'ab', -1),
# Mixed
('10', '1a', -1),
('1a', '10', 1),
('ab1', '42', 1),
]
for a, b, expected in cases:
result = base.identifier_cmp(a, b)
self.assertEqual(expected, result,
"identifier_cmp(%r, %r) returned %d instead of %d" % (
a, b, result, expected))
def test_identifier_list_cmp(self):
cases = [
# Same length
(['1', '2', '3'], ['1', '2', '3'], 0),
(['1', '2', '3'], ['1', '3', '2'], -1),
(['1', '2', '4'], ['1', '2', '3'], 1),
# Mixed lengths
(['1', 'a'], ['1', 'a', '0'], -1),
(['1', 'a', '0'], ['1', 'a'], 1),
(['1', 'b'], ['1', 'a', '1000'], 1),
]
for a, b, expected in cases:
result = base.identifier_list_cmp(a, b)
self.assertEqual(expected, result,
"identifier_list_cmp(%r, %r) returned %d instead of %d" % (
a, b, result, expected))
class TopLevelTestCase(unittest.TestCase):
"""Test module-level functions."""
versions = (
('0.1.0', '0.1.1', -1),
('0.1.1', '0.1.1', 0),
('0.1.1', '0.1.0', 1),
('0.1.0-alpha', '0.1.0', -1),
('0.1.0-alpha+2', '0.1.0-alpha', 1),
)
def test_compare(self):
for a, b, expected in self.versions:
result = base.compare(a, b)
self.assertEqual(expected, result,
"compare(%r, %r) should be %r instead of %r" % (a, b, expected, result))
matches = (
('>=0.1.1', '0.1.2'),
('>=0.1.1', '0.1.1'),
('>=0.1.1', '0.1.1-alpha'),
('>=0.1.1,!=0.2.0', '0.2.1'),
)
def test_match(self):
for spec, version in self.matches:
self.assertTrue(base.match(spec, version),
"%r should accept %r" % (spec, version))
valid_strings = (
'1.0.0-alpha',
'1.0.0-alpha.1',
'1.0.0-beta.2',
'1.0.0-beta.11',
'1.0.0-rc.1',
'1.0.0-rc.1+build.1',
'1.0.0',
'1.0.0+0.3.7',
'1.3.7+build',
'1.3.7+build.2.b8f12d7',
'1.3.7+build.11.e0f985a',
'1.1.1',
'1.1.2',
'1.1.3-rc4.5',
'1.1.3-rc42.3-14-15.24+build.2012-04-13.223',
'1.1.3+build.2012-04-13.HUY.alpha-12.1',
)
def test_validate_valid(self):
for version in self.valid_strings:
self.assertTrue(base.validate(version),
"%r should be a valid version" % (version,))
invalid_strings = (
'1',
'v1',
'1.2.3.4',
'1.2',
'1.2a3',
'1.2.3a4',
'v12.34.5',
'1.2.3+4+5',
)
def test_validate_invalid(self):
for version in self.invalid_strings:
self.assertFalse(base.validate(version),
"%r should not be a valid version" % (version,))
class VersionTestCase(unittest.TestCase):
versions = {
'1.0.0-alpha': (1, 0, 0, ('alpha',), ()),
'1.0.0-alpha.1': (1, 0, 0, ('alpha', '1'), ()),
'1.0.0-beta.2': (1, 0, 0, ('beta', '2'), ()),
'1.0.0-beta.11': (1, 0, 0, ('beta', '11'), ()),
'1.0.0-rc.1': (1, 0, 0, ('rc', '1'), ()),
'1.0.0-rc.1+build.1': (1, 0, 0, ('rc', '1'), ('build', '1')),
'1.0.0': (1, 0, 0, (), ()),
'1.0.0+0.3.7': (1, 0, 0, (), ('0', '3', '7')),
'1.3.7+build': (1, 3, 7, (), ('build',)),
'1.3.7+build.2.b8f12d7': (1, 3, 7, (), ('build', '2', 'b8f12d7')),
'1.3.7+build.11.e0f985a': (1, 3, 7, (), ('build', '11', 'e0f985a')),
'1.1.1': (1, 1, 1, (), ()),
'1.1.2': (1, 1, 2, (), ()),
'1.1.3-rc4.5': (1, 1, 3, ('rc4', '5'), ()),
'1.1.3-rc42.3-14-15.24+build.2012-04-13.223':
(1, 1, 3, ('rc42', '3-14-15', '24'), ('build', '2012-04-13', '223')),
'1.1.3+build.2012-04-13.HUY.alpha-12.1':
(1, 1, 3, (), ('build', '2012-04-13', 'HUY', 'alpha-12', '1')),
}
def test_parsing(self):
for text, expected_fields in self.versions.items():
version = base.Version(text)
actual_fields = (version.major, version.minor, version.patch,
version.prerelease, version.build)
self.assertEqual(expected_fields, actual_fields)
def test_str(self):
for text in self.versions:
version = base.Version(text)
self.assertEqual(text, str(version))
self.assertEqual("Version('%s')" % text, repr(version))
def test_compare_to_self(self):
for text in self.versions:
self.assertEqual(base.Version(text), base.Version(text))
self.assertNotEqual(text, base.Version(text))
partial_versions = {
'1.1': (1, 1, None, None, None),
'2': (2, None, None, None, None),
'1.0.0-alpha': (1, 0, 0, ('alpha',), None),
'1.0.0-alpha.1': (1, 0, 0, ('alpha', '1'), None),
'1.0.0-beta.2': (1, 0, 0, ('beta', '2'), None),
'1.0.0-beta.11': (1, 0, 0, ('beta', '11'), None),
'1.0.0-rc.1': (1, 0, 0, ('rc', '1'), None),
'1.0.0': (1, 0, 0, None, None),
'1.1.1': (1, 1, 1, None, None),
'1.1.2': (1, 1, 2, None, None),
'1.1.3-rc4.5': (1, 1, 3, ('rc4', '5'), None),
'1.0.0-': (1, 0, 0, (), None),
'1.0.0+': (1, 0, 0, (), ()),
'1.0.0-rc.1+build.1': (1, 0, 0, ('rc', '1'), ('build', '1')),
'1.0.0+0.3.7': (1, 0, 0, (), ('0', '3', '7')),
'1.3.7+build': (1, 3, 7, (), ('build',)),
'1.3.7+build.2.b8f12d7': (1, 3, 7, (), ('build', '2', 'b8f12d7')),
'1.3.7+build.11.e0f985a': (1, 3, 7, (), ('build', '11', 'e0f985a')),
'1.1.3-rc42.3-14-15.24+build.2012-04-13.223':
(1, 1, 3, ('rc42', '3-14-15', '24'), ('build', '2012-04-13', '223')),
'1.1.3+build.2012-04-13.HUY.alpha-12.1':
(1, 1, 3, (), ('build', '2012-04-13', 'HUY', 'alpha-12', '1')),
}
def test_parsing_partials(self):
for text, expected_fields in self.partial_versions.items():
version = base.Version(text, partial=True)
actual_fields = (version.major, version.minor, version.patch,
version.prerelease, version.build)
self.assertEqual(expected_fields, actual_fields)
self.assertTrue(version.partial, "%r should have partial=True" % version)
def test_str_partials(self):
for text in self.partial_versions:
version = base.Version(text, partial=True)
self.assertEqual(text, str(version))
self.assertEqual("Version('%s', partial=True)" % text, repr(version))
def test_compare_partial_to_self(self):
for text in self.partial_versions:
self.assertEqual(
base.Version(text, partial=True),
base.Version(text, partial=True))
self.assertNotEqual(text, base.Version(text, partial=True))
def test_hash(self):
self.assertEqual(1,
len(set([base.Version('0.1.0'), base.Version('0.1.0')])))
self.assertEqual(2,
len(set([base.Version('0.1.0'), base.Version('0.1.0', partial=True)])))
# A fully-defined 'partial' version isn't actually partial.
self.assertEqual(1,
len(set([
base.Version('0.1.0-a1+34'),
base.Version('0.1.0-a1+34', partial=True)
]))
)
@unittest.skipIf(is_python2, "Comparisons to other objects are broken in Py2.")
def test_invalid_comparisons(self):
v = base.Version('0.1.0')
with self.assertRaises(TypeError):
v < '0.1.0'
with self.assertRaises(TypeError):
v <= '0.1.0'
with self.assertRaises(TypeError):
v > '0.1.0'
with self.assertRaises(TypeError):
v >= '0.1.0'
self.assertTrue(v != '0.1.0')
self.assertFalse(v == '0.1.0')
class SpecItemTestCase(unittest.TestCase):
components = {
'==0.1.0': (base.SpecItem.KIND_EQUAL, 0, 1, 0, None, None),
'==0.1.2-rc3': (base.SpecItem.KIND_EQUAL, 0, 1, 2, ('rc3',), None),
'==0.1.2+build3.14': (base.SpecItem.KIND_EQUAL, 0, 1, 2, (), ('build3', '14')),
'<=0.1.1+': (base.SpecItem.KIND_LTE, 0, 1, 1, (), ()),
'<0.1.1': (base.SpecItem.KIND_LT, 0, 1, 1, None, None),
'<=0.1.1': (base.SpecItem.KIND_LTE, 0, 1, 1, None, None),
'<=0.1.1-': (base.SpecItem.KIND_LTE, 0, 1, 1, (), None),
'>=0.2.3-rc2': (base.SpecItem.KIND_GTE, 0, 2, 3, ('rc2',), None),
'>0.2.3-rc2+': (base.SpecItem.KIND_GT, 0, 2, 3, ('rc2',), ()),
'>=2.0.0': (base.SpecItem.KIND_GTE, 2, 0, 0, None, None),
'!=0.1.1+': (base.SpecItem.KIND_NEQ, 0, 1, 1, (), ()),
'!=0.3.0': (base.SpecItem.KIND_NEQ, 0, 3, 0, None, None),
}
def test_components(self):
for spec_text, components in self.components.items():
kind, major, minor, patch, prerelease, build = components
spec = base.SpecItem(spec_text)
self.assertEqual(kind, spec.kind)
self.assertEqual(major, spec.spec.major)
self.assertEqual(minor, spec.spec.minor)
self.assertEqual(patch, spec.spec.patch)
self.assertEqual(prerelease, spec.spec.prerelease)
self.assertEqual(build, spec.spec.build)
self.assertNotEqual(spec, spec_text)
self.assertEqual(spec_text, str(spec))
matches = {
'==0.1.0': (
['0.1.0', '0.1.0-rc1', '0.1.0+build1', '0.1.0-rc1+build2'],
['0.0.1', '0.2.0', '0.1.1'],
),
'==0.1.2-rc3': (
['0.1.2-rc3+build1', '0.1.2-rc3+build4.5'],
['0.1.2-rc4', '0.1.2', '0.1.3'],
),
'==0.1.2+build3.14': (
['0.1.2+build3.14'],
['0.1.2-rc+build3.14', '0.1.2+build3.15'],
),
'<=0.1.1': (
['0.0.0', '0.1.1-alpha1', '0.1.1', '0.1.1+build2'],
['0.1.2'],
),
'<0.1.1': (
['0.1.0', '0.0.0'],
['0.1.1', '0.1.1-zzz+999', '1.2.0', '0.1.1+build3'],
),
'<=0.1.1': (
['0.1.1+build4', '0.1.1-alpha', '0.1.0'],
['0.2.3', '1.1.1', '0.1.2'],
),
'<0.1.1-': (
['0.1.0', '0.1.1-alpha', '0.1.1-alpha+4'],
['0.2.0', '1.0.0', '0.1.1', '0.1.1+build1'],
),
'>=0.2.3-rc2': (
['0.2.3-rc3', '0.2.3', '0.2.3+1', '0.2.3-rc2', '0.2.3-rc2+1'],
['0.2.3-rc1', '0.2.2'],
),
'>0.2.3-rc2+': (
['0.2.3-rc3', '0.2.3', '0.2.3-rc2+1'],
['0.2.3-rc1', '0.2.2', '0.2.3-rc2'],
),
'>2.0.0+': (
['2.1.1', '2.0.0+b1', '3.1.4'],
['1.9.9', '1.9.9999', '2.0.0', '2.0.0-rc4'],
),
'!=0.1.1': (
['0.1.2', '0.1.0', '1.4.2'],
['0.1.1', '0.1.1-alpha', '0.1.1+b1'],
),
'!=0.3.4-': (
['0.4.0', '1.3.0', '0.3.4-alpha', '0.3.4-alpha+b1'],
['0.3.4', '0.3.4+b1'],
),
}
def test_matches(self):
for spec_text, versions in self.matches.items():
spec = base.SpecItem(spec_text)
matching, failing = versions
for version_text in matching:
version = base.Version(version_text)
self.assertTrue(spec.match(version), "%r should match %r" % (version, spec))
for version_text in failing:
version = base.Version(version_text)
self.assertFalse(spec.match(version),
"%r should not match %r" % (version, spec))
def test_equality(self):
spec1 = base.SpecItem('==0.1.0')
spec2 = base.SpecItem('==0.1.0')
self.assertEqual(spec1, spec2)
self.assertFalse(spec1 == '==0.1.0')
def test_to_string(self):
spec = base.SpecItem('==0.1.0')
self.assertEqual('==0.1.0', str(spec))
self.assertEqual(base.SpecItem.KIND_EQUAL, spec.kind)
def test_hash(self):
self.assertEqual(1,
len(set([base.SpecItem('==0.1.0'), base.SpecItem('==0.1.0')])))
class CoerceTestCase(unittest.TestCase):
examples = {
# Dict of target: [list of equivalents]
'0.0.0': ('0', '0.0', '0.0.0', '0.0.0+', '0-'),
'0.1.0': ('0.1', '0.1+', '0.1-', '0.1.0'),
'0.1.0+2': ('0.1.0+2', '0.1.0.2'),
'0.1.0+2.3.4': ('0.1.0+2.3.4', '0.1.0+2+3+4', '0.1.0.2+3+4'),
'0.1.0+2-3.4': ('0.1.0+2-3.4', '0.1.0+2-3+4', '0.1.0.2-3+4', '0.1.0.2_3+4'),
'0.1.0-a2.3': ('0.1.0-a2.3', '0.1.0a2.3', '0.1.0_a2.3'),
'0.1.0-a2.3+4.5-6': ('0.1.0-a2.3+4.5-6', '0.1.0a2.3+4.5-6', '0.1.0a2.3+4.5_6', '0.1.0a2.3+4+5/6'),
}
def test_coerce(self):
for equivalent, samples in self.examples.items():
target = base.Version(equivalent)
for sample in samples:
v_sample = base.Version.coerce(sample)
self.assertEqual(target, v_sample)
def test_invalid(self):
self.assertRaises(ValueError, base.Version.coerce, 'v1')
class SpecTestCase(unittest.TestCase):
examples = {
'>=0.1.1,<0.1.2': ['>=0.1.1', '<0.1.2'],
'>=0.1.0,!=0.1.3-rc1,<0.1.3': ['>=0.1.0', '!=0.1.3-rc1', '<0.1.3'],
}
def test_parsing(self):
for spec_list_text, specs in self.examples.items():
spec_list = base.Spec(spec_list_text)
self.assertEqual(spec_list_text, str(spec_list))
self.assertNotEqual(spec_list_text, spec_list)
self.assertEqual(specs, [str(spec) for spec in spec_list])
for spec_text in specs:
self.assertTrue(repr(base.SpecItem(spec_text)) in repr(spec_list))
split_examples = {
('>=0.1.1', '<0.1.2', '!=0.1.1+build1'): ['>=0.1.1', '<0.1.2', '!=0.1.1+build1'],
('>=0.1.0', '!=0.1.3-rc1,<0.1.3'): ['>=0.1.0', '!=0.1.3-rc1', '<0.1.3'],
}
def test_parsing_split(self):
for spec_list_texts, specs in self.split_examples.items():
spec_list = base.Spec(*spec_list_texts)
self.assertEqual(','.join(spec_list_texts), str(spec_list))
self.assertEqual(specs, [str(spec) for spec in spec_list])
self.assertEqual(spec_list, base.Spec(','.join(spec_list_texts)))
for spec_text in specs:
self.assertTrue(repr(base.SpecItem(spec_text)) in repr(spec_list))
matches = {
'>=0.1.1,<0.1.2': (
['0.1.1', '0.1.1+4', '0.1.1-alpha'],
['0.1.2-alpha', '0.1.2', '1.3.4'],
),
'>=0.1.0+,!=0.1.3-rc1,<0.1.4': (
['0.1.1', '0.1.0+b4', '0.1.2', '0.1.3-rc2'],
['0.0.1', '0.1.4', '0.1.4-alpha', '0.1.3-rc1+4',
'0.1.0-alpha', '0.2.2', '0.1.4-rc1'],
),
}
def test_matches(self):
for spec_list_text, versions in self.matches.items():
spec_list = base.Spec(spec_list_text)
matching, failing = versions
for version_text in matching:
version = base.Version(version_text)
self.assertTrue(version in spec_list,
"%r should be in %r" % (version, spec_list))
self.assertTrue(spec_list.match(version),
"%r should match %r" % (version, spec_list))
for version_text in failing:
version = base.Version(version_text)
self.assertFalse(version in spec_list,
"%r should not be in %r" % (version, spec_list))
self.assertFalse(spec_list.match(version),
"%r should not match %r" % (version, spec_list))
def test_equality(self):
for spec_list_text in self.examples:
slist1 = base.Spec(spec_list_text)
slist2 = base.Spec(spec_list_text)
self.assertEqual(slist1, slist2)
self.assertFalse(slist1 == spec_list_text)
def test_filter_empty(self):
s = base.Spec('>=0.1.1')
res = tuple(s.filter(()))
self.assertEqual((), res)
def test_filter_incompatible(self):
s = base.Spec('>=0.1.1,!=0.1.4')
res = tuple(s.filter([
base.Version('0.1.0'),
base.Version('0.1.4'),
base.Version('0.1.4-alpha'),
]))
self.assertEqual((), res)
def test_filter_compatible(self):
s = base.Spec('>=0.1.1,!=0.1.4,<0.2.0')
res = tuple(s.filter([
base.Version('0.1.0'),
base.Version('0.1.1'),
base.Version('0.1.5'),
base.Version('0.1.4-alpha'),
base.Version('0.1.2'),
base.Version('0.2.0-rc1'),
base.Version('3.14.15'),
]))
expected = (
base.Version('0.1.1'),
base.Version('0.1.5'),
base.Version('0.1.2'),
)
self.assertEqual(expected, res)
def test_select_empty(self):
s = base.Spec('>=0.1.1')
self.assertIsNone(s.select(()))
def test_select_incompatible(self):
s = base.Spec('>=0.1.1,!=0.1.4')
res = s.select([
base.Version('0.1.0'),
base.Version('0.1.4'),
base.Version('0.1.4-alpha'),
])
self.assertIsNone(res)
def test_select_compatible(self):
s = base.Spec('>=0.1.1,!=0.1.4,<0.2.0')
res = s.select([
base.Version('0.1.0'),
base.Version('0.1.1'),
base.Version('0.1.5'),
base.Version('0.1.4-alpha'),
base.Version('0.1.2'),
base.Version('0.2.0-rc1'),
base.Version('3.14.15'),
])
self.assertEqual(base.Version('0.1.5'), res)
def test_contains(self):
self.assertFalse('ii' in base.Spec('>=0.1.1'))
def test_hash(self):
self.assertEqual(1,
len(set([base.Spec('>=0.1.1'), base.Spec('>=0.1.1')])))
if __name__ == '__main__': # pragma: no cover
unittest.main()
| StarcoderdataPython |
11344884 | #!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: docker_prune
short_description: Allows to prune various docker objects
description:
- Allows to run C(docker container prune), C(docker image prune), C(docker network prune)
and C(docker volume prune) via the Docker API.
options:
containers:
description:
- Whether to prune containers.
type: bool
default: no
containers_filters:
description:
- A dictionary of filter values used for selecting containers to delete.
- "For example, C(until: 24h)."
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering)
for more information on possible filters.
type: dict
images:
description:
- Whether to prune images.
type: bool
default: no
images_filters:
description:
- A dictionary of filter values used for selecting images to delete.
- "For example, C(dangling: true)."
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering)
for more information on possible filters.
type: dict
networks:
description:
- Whether to prune networks.
type: bool
default: no
networks_filters:
description:
- A dictionary of filter values used for selecting networks to delete.
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
for more information on possible filters.
type: dict
volumes:
description:
- Whether to prune volumes.
type: bool
default: no
volumes_filters:
description:
- A dictionary of filter values used for selecting volumes to delete.
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering)
for more information on possible filters.
type: dict
builder_cache:
description:
- Whether to prune the builder cache.
- Requires version 3.3.0 of the Docker SDK for Python or newer.
type: bool
default: no
extends_documentation_fragment:
- community.general.docker
- community.general.docker.docker_py_2_documentation
author:
- "<NAME> (@felixfontein)"
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0"
- "Docker API >= 1.25"
'''
EXAMPLES = '''
- name: Prune containers older than 24h
community.general.docker_prune:
containers: yes
containers_filters:
# only consider containers created more than 24 hours ago
until: 24h
- name: Prune everything
community.general.docker_prune:
containers: yes
images: yes
networks: yes
volumes: yes
builder_cache: yes
- name: Prune everything (including non-dangling images)
community.general.docker_prune:
containers: yes
images: yes
images_filters:
dangling: false
networks: yes
volumes: yes
builder_cache: yes
'''
RETURN = '''
# containers
containers:
description:
- List of IDs of deleted containers.
returned: I(containers) is C(true)
type: list
elements: str
sample: '[]'
containers_space_reclaimed:
description:
- Amount of reclaimed disk space from container pruning in bytes.
returned: I(containers) is C(true)
type: int
sample: '0'
# images
images:
description:
- List of IDs of deleted images.
returned: I(images) is C(true)
type: list
elements: str
sample: '[]'
images_space_reclaimed:
description:
- Amount of reclaimed disk space from image pruning in bytes.
returned: I(images) is C(true)
type: int
sample: '0'
# networks
networks:
description:
- List of IDs of deleted networks.
returned: I(networks) is C(true)
type: list
elements: str
sample: '[]'
# volumes
volumes:
description:
- List of IDs of deleted volumes.
returned: I(volumes) is C(true)
type: list
elements: str
sample: '[]'
volumes_space_reclaimed:
description:
- Amount of reclaimed disk space from volumes pruning in bytes.
returned: I(volumes) is C(true)
type: int
sample: '0'
# builder_cache
builder_cache_space_reclaimed:
description:
- Amount of reclaimed disk space from builder cache pruning in bytes.
returned: I(builder_cache) is C(true)
type: int
sample: '0'
'''
import traceback
try:
from docker.errors import DockerException
except ImportError:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
from distutils.version import LooseVersion
from ansible_collections.community.general.plugins.module_utils.docker.common import (
AnsibleDockerClient,
RequestException,
)
try:
from ansible_collections.community.general.plugins.module_utils.docker.common import docker_version, clean_dict_booleans_for_docker_api
except Exception as dummy:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
def main():
argument_spec = dict(
containers=dict(type='bool', default=False),
containers_filters=dict(type='dict'),
images=dict(type='bool', default=False),
images_filters=dict(type='dict'),
networks=dict(type='bool', default=False),
networks_filters=dict(type='dict'),
volumes=dict(type='bool', default=False),
volumes_filters=dict(type='dict'),
builder_cache=dict(type='bool', default=False),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
# supports_check_mode=True,
min_docker_api_version='1.25',
min_docker_version='2.1.0',
)
# Version checks
cache_min_version = '3.3.0'
if client.module.params['builder_cache'] and client.docker_py_version < LooseVersion(cache_min_version):
msg = "Error: Docker SDK for Python's version is %s. Minimum version required for builds option is %s. Use `pip install --upgrade docker` to upgrade."
client.fail(msg % (docker_version, cache_min_version))
try:
result = dict()
if client.module.params['containers']:
filters = clean_dict_booleans_for_docker_api(client.module.params.get('containers_filters'))
res = client.prune_containers(filters=filters)
result['containers'] = res.get('ContainersDeleted') or []
result['containers_space_reclaimed'] = res['SpaceReclaimed']
if client.module.params['images']:
filters = clean_dict_booleans_for_docker_api(client.module.params.get('images_filters'))
res = client.prune_images(filters=filters)
result['images'] = res.get('ImagesDeleted') or []
result['images_space_reclaimed'] = res['SpaceReclaimed']
if client.module.params['networks']:
filters = clean_dict_booleans_for_docker_api(client.module.params.get('networks_filters'))
res = client.prune_networks(filters=filters)
result['networks'] = res.get('NetworksDeleted') or []
if client.module.params['volumes']:
filters = clean_dict_booleans_for_docker_api(client.module.params.get('volumes_filters'))
res = client.prune_volumes(filters=filters)
result['volumes'] = res.get('VolumesDeleted') or []
result['volumes_space_reclaimed'] = res['SpaceReclaimed']
if client.module.params['builder_cache']:
res = client.prune_builds()
result['builder_cache_space_reclaimed'] = res['SpaceReclaimed']
client.module.exit_json(**result)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
except RequestException as e:
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| StarcoderdataPython |
3398983 | <filename>examples/doc-example-6.py
#!/usr/bin/python
"""Simple Hyperion client request demonstration."""
import asyncio
import logging
import sys
from hyperion import client
HOST = "hyperion"
PRIORITY = 20
async def instance_start_and_switch():
"""Wait for an instance to start."""
instance_ready = asyncio.Event()
def instance_update(json):
print("receive json %s", json)
for data in json["data"]:
if data["instance"] == 1 and data["running"]:
instance_ready.set()
hc = client.HyperionClient(HOST, callbacks={"instance-update": instance_update})
if not await hc.async_client_connect():
logging.error("Could not connect to: %s", HOST)
return
if not client.ResponseOK(await hc.async_start_instance(instance=1)):
logging.error("Could not start instance on: %s", HOST)
return
# Blocks waiting for the instance to start.
await instance_ready.wait()
if not client.ResponseOK(await hc.async_switch_instance(instance=1)):
logging.error("Could not switch instance on: %s", HOST)
return
await hc.async_client_disconnect()
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
asyncio.get_event_loop().run_until_complete(instance_start_and_switch())
| StarcoderdataPython |
3210614 | import os
from os.path import dirname, realpath, isdir, join
thisDir = dirname(realpath(__file__))
fns = os.listdir(thisDir)
with open('hide.txt') as f:
ignore = f.readlines()
ignore = [s.rstrip('\n') for s in ignore]
sketches = []
for folder in fns:
fp = join(thisDir, folder)
if not isdir(fp) or folder in ignore or folder.startswith('.'):
continue
sketches.append(folder)
with open('README.md', 'w') as f:
f.write('# List of sketches\n')
for folder in sorted(sketches, key=str.casefold):
f.write('* [%s](https://fepegar.github.io/creative-coding/%s)\n' % (folder, folder))
| StarcoderdataPython |
11354575 | #!/usr/bin/env python
import django
from django.conf import settings
from django.core.management import call_command
SECRET_KEY = 'fake-key'
DEBUG = True
INSTALLED_APPS = (
# Required contrib apps.
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.messages',
# Our app and it's test app.
'dbsettings',
'tests',
)
SITE_ID= 1
ROOT_URLCONF = 'tests.test_urls'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
#'TEST': {
# 'NAME': 'auto_tests',
#}
}
}
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
| StarcoderdataPython |
3405658 | <reponame>maltahan/DS28E38Upycraft
try:
import usocket as socket
except:
import socket
import binascii
import json
import ds28e38
import onewire
import _onewire as _ow
import network
import time
import os
import struct
challenge = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #create stream socket
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) #Set the value of the given socket option
s.bind(('', 80)) #bind ip and port
s.listen(20)
def connect_to_ds28e38():
while True:
conn, addr = s.accept() #Accept a connection,conn is a new socket object
print("Got a connection from %s" % str(addr))
request = conn.recv(1024) #Receive 1024 byte of data from the socket
ds = ds28e38.DS28E38(onewire)
print("the crc for the sequence of commands to get the signature is:")
data = bytearray(b'\x66\x22\xa5\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
crc = ds.onewire_crc16(data)
packed_crc = struct.pack('H', crc)
inverted_crc = bytearray((b^0xFF for b in packed_crc))
print(binascii.hexlify(inverted_crc))
ds.readStatus()
ManRomdata = json.loads(ds.read_man_rom_Id())
print(ManRomdata)
signatureData = json.loads(ds.sign_message(challenge))
print(signatureData)
pageData = binascii.hexlify(ds.read_page(0))
publickey_x = binascii.hexlify(ds.read_page(4))
publickey_y = binascii.hexlify(ds.read_page(5))
completeData = {}
completeData['romId'] = ManRomdata['romId']
completeData['manId'] = ManRomdata['manId']
completeData['publicKey_x'] = publickey_x
completeData['publicKey_y'] = publickey_y
completeData['r'] = signatureData['r']
completeData['s'] = signatureData['s']
completeData['crc-16'] = signatureData['crc-16']
completeData['page_data'] = pageData
completeData['challenge'] = binascii.hexlify(challenge)
signature_data_json = json.dumps(completeData)
print(signature_data_json)
conn.sendall('HTTP/1.1 200 OK\nConnection: close\nServer: FireBeetle\nContent-Type: application/json\nAccess-Control-Allow-Origin:*\nContent-length:{:d}\n\n'.format(len(signature_data_json)))
conn.sendall(signature_data_json)
conn.close() #close file
print("Connection wth %s closed" % str(addr))
try:
connectToDS28E38()
except Exception as a:
if (s):
print('there is a problem with the socket')
print(a)
s.close()
| StarcoderdataPython |
9628084 | <reponame>NNTin/Dota-2-Meta-Analyzer
import requests
import time
from steamapi.steamapikey import SteamAPIKey
#from reddit.botinfo import message
message = False
def getMatchDetails(matchID, q=None):
try:
response = {}
attempt = 0
while response == {}:
if message: print('[getmatchdetails] get match details')
URL = "https://api.steampowered.com/IDOTA2Match_570/GetMatchDetails/V001/?key=" + SteamAPIKey + "&match_id=" + str(matchID)
response = requests.get(URL)
response.connection.close()
response = response.json()
# careful Steam API sometimes returns empty JSONs!
# handle this error!
if response == {}:
attempt += 1
if (attempt == 10):
print('Tried %s times, cancelling API request. (Skipped counter increases)')
if q == None:
return response
else:
q.put(response)
break
print('Failed API request, retrying in %s seconds' %(2))
print(URL)
time.sleep(attempt * 2)
continue
else:
if q == None:
return response
else:
q.put(response)
except:
print('[getmatchdetails] there was an error, match is skipped! %s' %matchID)
response = {}
if q == None:
return response
else:
q.put(response)
# future, retry until it works!
| StarcoderdataPython |
4881347 | <reponame>suyash248/data_structures<filename>Tree/pathToTarget.py<gh_stars>1-10
from Tree.commons import insert, print_tree, is_leaf
# Time complexity: O(n)
def path_to_target_util_v1(root, target_key, path=[]):
if root == None:
return False
if root.key == target_key:
path.append(root.key)
return True
lpath = path_to_target_util_v1(root.left, target_key, path)
rpath = path_to_target_util_v1(root.right, target_key, path)
if lpath or rpath:
path.append(root.key)
return lpath or rpath
def path_to_target_v1(root, target_key):
path = []
path_to_target_util_v1(root, target_key, path)
return path[::-1]
# Time complexity: O(log(n))
def path_to_target_v2(root, target_key, path=[]):
if root == None:
return False
path.append(root.key)
if root.key == target_key:
return True
elif target_key < root.key:
return path_to_target_v2(root.left, target_key, path)
elif target_key > root.key:
return path_to_target_v2(root.right, target_key, path)
path_v3 = []
def path_to_target_v3(root, target_key):
# global path_v3
if root is None:
return None
if root.key == target_key:
path_v3.append(root.key)
return root
lpath = path_to_target_v3(root.left, target_key)
if lpath is not None:
path_v3.append(root.key)
return root
rpath = path_to_target_v3(root.right, target_key)
if rpath is not None:
path_v3.append(root.key)
return root
return None
# Driver program to test above function
if __name__ == "__main__":
""" Let us create following BST
50
/ \
30 70
/ \ / \
20 40 60 80
/ \
15 25
"""
root = None
root = insert(root, 50)
insert(root, 30)
insert(root, 20)
insert(root, 15)
insert(root, 25)
insert(root, 40)
insert(root, 70)
insert(root, 60)
insert(root, 80)
target = 40
print ("\n----------------- USING V1 -----------------\n")
path = path_to_target_v1(root, target)
if len(path) > 0:
print ("Path from root {root} to node {target} is - {path}"\
.format(root=root.key, target=target, path=path))
else:
print ("Target key {target} not found".format(target=target))
print ("\n----------------- USING V2 -----------------\n")
path = []
is_path_exists = path_to_target_v2(root, target, path)
if is_path_exists:
print ("Path from root {root} to node {target} is - {path}"\
.format(root=root.key, target=target, path=path))
else:
print ("Target key {target} not found".format(target=target))
print ("\n----------------- USING V3 -----------------\n")
path_to_target_v3(root, target)
if len(path_v3) > 0:
print("Path from root {root} to node {target} is - {path}" \
.format(root=root.key, target=target, path=path_v3[::-1]))
else:
print ("Target key {target} not found".format(target=target))
| StarcoderdataPython |
239939 | <gh_stars>0
# Space: O(l)
# Time: O(m * n * l)
class Solution:
def exist(self, board, word) -> bool:
column_length = len(board)
row_length = len(board[0])
word_length = len(word)
def dfs(x, y, index):
if index >= word_length: return True
if (not 0 <= x < row_length) or (not 0 <= y < column_length): return False
if board[y][x] != word[index]: return False
temp = board[y][x]
board[y][x] = '*'
if dfs(x - 1, y, index + 1): return True
if dfs(x + 1, y, index + 1): return True
if dfs(x, y - 1, index + 1): return True
if dfs(x, y + 1, index + 1): return True
board[y][x] = temp
for column in range(column_length):
for row in range(row_length):
if dfs(row, column, 0): return True
return False
| StarcoderdataPython |
8006711 | <reponame>kienpt/site_discovery_public
"""
Perform the ranking of the candidate websites
with respect to seed websites
"""
import sys
sys.path.append("utils")
from urlutility import URLUtility
import heapq
from fetcher import Fetcher
from jaccard_similarity import Jaccard_Similarity
from cosine_similarity import Cosine_Similarity
from random_similarity import Random_Similarity
from bayesian_sets import Bayesian_Sets
from oneclass_svm import Oneclass_SVM
from pu_learning import PULearning
from classifier_ranker import ClassifierRanker
from stacking import StackingRanker
from time import time
class Ranker(object):
def __init__(self, seeds, representation, scoring="similarity", neg_sites=None):
self.seeds = seeds
if scoring=="pu_learning":
print "Use PU learning as scoring method"
self.scorer = PULearning(self.seeds, representation, neg_sites)
elif scoring=="jaccard":
print "Use jaccard similarity as scoring method"
self.scorer = Jaccard_Similarity(self.seeds, representation)
elif scoring=="cosine":
print "Use cosine similarity as scoring method"
self.scorer = Cosine_Similarity(self.seeds, representation)
elif scoring=="bayesian_bin":
print "Use bayesian sets (binary representation) as scoring method"
#representation['RANKING_EVALUATION']['BayesianSetType'] = 'binary'
self.scorer = Bayesian_Sets(self.seeds, representation, value_type="binary")
elif scoring=="bayesian_tfidf":
print "Use bayesian sets (tfidf representation) as scoring method"
#representation['RANKING_EVALUATION']['BayesianSetType'] = 'tfidf'
self.scorer = Bayesian_Sets(self.seeds, representation, value_type="tfidf")
elif scoring=="oneclass":
print "Use One-Class SVM as scoring method"
self.scorer = Oneclass_SVM(self.seeds, representation)
elif scoring=="random":
print "Use random as scoring method"
self.scorer = Random_Similarity(self.seeds)
elif scoring=="classifier":
print "Use classifier as scoring method"
self.scorer = ClassifierRanker(self.seeds, representation, neg_sites)
elif scoring=='stacking':
print "Use stacking as scoring method"
self.scorer = StackingRanker(self.seeds, representation, neg_sites, value_type='avg')
elif scoring=='stacking_rrf':
print "Use stacking rrf as scoring method"
self.scorer = StackingRanker(self.seeds, representation, neg_sites, value_type='rrf')
else:
print "Scoring type is wrong. Use similarity as default"
#self.scorer = Jaccard_Similarity(self.seeds)
self.scorer = Cosine_Similarity(self.seeds, representation)
def rank(self, candidates, prf=False):
"""
Arguments:
prf: Pseudo-Relevance Feedback
Return type:
List of [website, score] sorted by score in descending order
"""
scores = self.scorer.score(candidates) # scores = list of (website, score)
scores.sort(reverse=True, key=lambda x:x[1])
if prf:
# Add the top k candidates to the seeds
print "Reset previous vectorization"
for s in candidates:
s.clear()
print "Reranking"
k = 10
top_sites = [item[0] for item in scores[:k]]
self.scorer.update_seeds(top_sites)
scores = self.scorer.score(candidates) # scores = list of (website, score)
scores.sort(reverse=True, key=lambda x:x[1])
return scores
def test_ranking(seed_file, cand_file, data_dir):
seed_urls = URLUtility.load_urls(seed_file)
cand_urls = URLUtility.load_urls(cand_file)
t = time()
fetcher = Fetcher(data_dir)
print "Time to initialize fetcher: ", time()-t
t = time()
seeds = fetcher.fetch(seed_urls)
print "Time to fetch seeds: ", time()-t
t = time()
candidates = fetcher.fetch(cand_urls)
print "Time to fetch candidates: ", time()-t
t = time()
ranker = Ranker(seeds)
top_sites = ranker.rank(candidates)
for site, score in top_sites:
print site.host, score
print "Time to rank: ", time()-t
if __name__=="__main__":
seed_file = sys.argv[1]
cand_file = sys.argv[2]
data_dir = sys.argv[3]
test_ranking(seed_file, cand_file, data_dir)
| StarcoderdataPython |
384749 | <reponame>DALME/dalme<filename>dalme_app/models/reference.py
from django.db import models
from dalme_app.models._templates import dalmeIntid, dalmeUuid
import django.db.models.options as options
options.DEFAULT_NAMES = options.DEFAULT_NAMES + ('in_db',)
class AttributeReference(dalmeUuid):
name = models.CharField(max_length=255)
short_name = models.CharField(max_length=55)
description = models.TextField()
data_type = models.CharField(max_length=15)
source = models.CharField(max_length=255)
term_type = models.CharField(max_length=55, blank=True, default=None)
class CountryReference(dalmeIntid):
name = models.CharField(max_length=255, unique=True)
alpha_3_code = models.CharField(max_length=3)
alpha_2_code = models.CharField(max_length=2)
num_code = models.IntegerField()
class Meta:
ordering = ["name"]
def __str__(self):
return self.name
def get_url(self):
return '/countries/' + str(self.id)
class LanguageReference(dalmeIntid):
LANGUAGE = 1
DIALECT = 2
LANG_TYPES = (
(LANGUAGE, 'Language'),
(DIALECT, 'Dialect'),
)
glottocode = models.CharField(max_length=25, unique=True)
iso6393 = models.CharField(max_length=25, unique=True, blank=True, null=True, default=None)
name = models.CharField(max_length=255)
type = models.IntegerField(choices=LANG_TYPES)
parent = models.ForeignKey('self', on_delete=models.SET_NULL, null=True)
class Meta:
ordering = ["name"]
def __str__(self):
return self.name
def get_url(self):
return '/languages/' + str(self.id)
class LocaleReference(dalmeIntid):
name = models.CharField(max_length=255)
administrative_region = models.CharField(max_length=255)
country = models.ForeignKey('CountryReference', on_delete=models.SET_NULL, null=True)
latitude = models.DecimalField(max_digits=9, decimal_places=6, null=True)
longitude = models.DecimalField(max_digits=9, decimal_places=6, null=True)
class Meta:
ordering = ['country', 'name']
unique_together = ('name', 'administrative_region')
def __str__(self):
return f'{self.name}, {self.administrative_region}, {str(self.country)}'
def get_url(self):
return '/locales/' + str(self.id)
| StarcoderdataPython |
171200 | from stn import spatial_transformer_network as transformer
from tensorflow.keras import layers, Model
class STModel(object):
def __init__(self, input_shape):
self.inpt = layers.Input(input_shape)
self.output = self.transformer_net(self.inpt, self.localization_net(self.inpt))
return Model(self.inpt, self.output)
def localization_net(self, x):
x = layers.Conv2D(10, (2,2), padding='same')(x)
x = layers.Activation('relu')(x)
x = layers.MaxPool2D((2,2))(x)
x = layers.Conv2D(20, (2,2), padding='same')(x)
x = layers.Activation('relu')(x)
x = layers.MaxPool2D((2,2))(x)
x = layers.Conv2D(30, (2,2), padding='same')(x)
x = layers.Activation('relu')(x)
x = layers.MaxPool2D((2,2))(x)
x = tf.flatten(x)
x = layers.Dense(6, 25)(x)
return x
def transformer_net(self, x, theta):
return transformer(x, theta)
| StarcoderdataPython |
1821160 | import random
import uuid
from typing import Any, Callable, List
from unittest.mock import Mock
import attr
from _pytest.monkeypatch import MonkeyPatch
from assertpy import assert_that
from mongomock import MongoClient
import shotgrid_leecher.repository.shotgrid_hierarchy_repo as repository
import shotgrid_leecher.utils.connectivity as conn
from shotgrid_leecher.domain import batch_domain as sut
from shotgrid_leecher.record.avalon_structures import AvalonProjectData
from shotgrid_leecher.record.commands import UpdateShotgridInAvalonCommand
from shotgrid_leecher.record.enums import ShotgridType
from shotgrid_leecher.record.intermediate_structures import (
IntermediateProject,
IntermediateParams,
IntermediateRow,
IntermediateAsset,
IntermediateTask,
IntermediateGroup,
IntermediateProjectConfig,
IntermediateProjectStep,
)
from shotgrid_leecher.record.results import BatchResult
from shotgrid_leecher.record.shotgrid_structures import ShotgridCredentials
from shotgrid_leecher.record.shotgrid_subtypes import (
FieldsMapping,
ProjectFieldsMapping,
AssetFieldsMapping,
ShotFieldsMapping,
TaskFieldsMapping,
StepFieldsMapping,
AssetToShotLinkMapping,
ShotToShotLinkMapping,
AssetToAssetLinkMapping,
)
from shotgrid_leecher.repository import intermediate_hierarchy_repo
from shotgrid_leecher.utils.ids import to_object_id
from shotgrid_leecher.writers import db_writer
TASK_NAMES = ["lines", "color", "look", "dev"]
STEP_NAMES = ["modeling", "shading", "rigging"]
def _fun(param: Any) -> Callable[[Any], Any]:
return lambda *_: param
def _params() -> IntermediateParams:
common = set(attr.fields_dict(IntermediateParams).keys()).intersection(
set(attr.fields_dict(AvalonProjectData).keys())
)
params = {
k: v for k, v in AvalonProjectData().to_dict().items() if k in common
}
return IntermediateParams(**params)
def _patch_adjacent(patcher: MonkeyPatch, client, hierarchy: List) -> None:
patcher.setattr(conn, "get_db_client", _fun(client))
patcher.setattr(repository, "get_hierarchy_by_project", _fun(hierarchy))
def _default_fields_mapping() -> FieldsMapping:
return FieldsMapping(
ProjectFieldsMapping.from_dict({}),
AssetFieldsMapping.from_dict({}),
ShotFieldsMapping.from_dict({}),
TaskFieldsMapping.from_dict({}),
StepFieldsMapping.from_dict({}),
AssetToShotLinkMapping.from_dict({}),
ShotToShotLinkMapping.from_dict({}),
AssetToAssetLinkMapping.from_dict({}),
)
def _get_project() -> IntermediateProject:
project_id = str(uuid.uuid4())[0:8]
return IntermediateProject(
id=f"Project_{project_id}",
code=f"Project_{project_id}",
src_id=111,
params=_params(),
config=IntermediateProjectConfig(
steps=[IntermediateProjectStep(x, x[:1]) for x in STEP_NAMES]
),
object_id=to_object_id(111),
)
def _get_asset_group(project: IntermediateProject) -> IntermediateGroup:
return IntermediateGroup(
id=ShotgridType.ASSET.value,
parent=f",{project.id},",
params=_params(),
object_id=to_object_id(ShotgridType.ASSET.value),
)
def _get_shot_group(project: IntermediateProject) -> IntermediateGroup:
return IntermediateGroup(
id=ShotgridType.GROUP.value,
parent=f",{project.id},",
params=_params(),
object_id=to_object_id(ShotgridType.GROUP.value),
)
def _get_prp_assets(
parent: IntermediateRow,
) -> List[IntermediateGroup]:
return [
IntermediateGroup(
id="PRP",
parent=f"{parent.parent}{parent.id},",
params=_params(),
object_id=to_object_id("PRP"),
),
IntermediateAsset(
id="Fork",
parent=f"{parent.parent}{parent.id},PRP,",
src_id=uuid.uuid4().int,
params=_params(),
object_id=to_object_id("Fork"),
linked_entities=[],
),
]
def _get_prp_asset_with_tasks(
parent: IntermediateRow, task_num
) -> List[IntermediateTask]:
asset = _get_prp_assets(parent)
tasks = [
IntermediateTask(
id=f"{random.choice(TASK_NAMES)}_{uuid.uuid4().int}",
src_id=uuid.uuid4().int,
task_type=random.choice(STEP_NAMES),
parent=f"{asset[1].parent}{asset[1].id},",
params=_params(),
object_id=to_object_id(uuid.uuid4().int),
status=str(uuid.uuid4()),
assigned_users=[],
)
for _ in range(task_num)
]
return [*asset, *tasks]
def test_shotgrid_to_avalon_batch_update_empty(monkeypatch: MonkeyPatch):
# Arrange
client = MongoClient()
_patch_adjacent(monkeypatch, client, [])
command = UpdateShotgridInAvalonCommand(
123,
"",
True,
ShotgridCredentials("", "", ""),
_default_fields_mapping(),
AvalonProjectData(),
)
# Act
sut.update_shotgrid_in_avalon(command)
# Assert
assert_that(client["avalon"].list_collection_names()).is_length(0)
def test_shotgrid_to_avalon_batch_update_project(monkeypatch: MonkeyPatch):
# Arrange
client = Mock()
data = [_get_project()]
upsert_mock = Mock(return_value=data[0].object_id)
monkeypatch.setattr(conn, "get_db_client", _fun(client))
monkeypatch.setattr(repository, "get_hierarchy_by_project", _fun(data))
monkeypatch.setattr(
intermediate_hierarchy_repo, "fetch_by_project", _fun(data)
)
monkeypatch.setattr(db_writer, "overwrite_intermediate", _fun(None))
monkeypatch.setattr(db_writer, "upsert_avalon_rows", upsert_mock)
command = UpdateShotgridInAvalonCommand(
123,
data[0].id,
True,
ShotgridCredentials("", "", ""),
_default_fields_mapping(),
AvalonProjectData(),
)
# Act
sut.update_shotgrid_in_avalon(command)
# Assert
assert_that(upsert_mock.call_args).is_length(2)
assert_that(upsert_mock.call_args_list).is_length(1)
assert_that(upsert_mock.call_args_list[0][0][1][0]["_id"]).is_equal_to(
data[0].object_id
)
def test_shotgrid_to_avalon_batch_update_asset_value(monkeypatch: MonkeyPatch):
# Arrange
client = MongoClient()
project = _get_project()
asset_grp = _get_asset_group(project)
data = [project, asset_grp, *_get_prp_assets(asset_grp)]
# last_batch_data = [attr.evolve(x, object_id=ObjectId()) for x in data[:2]]
call_list = []
def upsert_mock(project_name, rows):
for x in rows:
call_list.append(x["_id"])
monkeypatch.setattr(conn, "get_db_client", _fun(client))
monkeypatch.setattr(repository, "get_hierarchy_by_project", _fun(data))
monkeypatch.setattr(
intermediate_hierarchy_repo, "fetch_by_project", _fun(data)
)
monkeypatch.setattr(db_writer, "overwrite_intermediate", _fun(None))
monkeypatch.setattr(db_writer, "upsert_avalon_rows", upsert_mock)
command = UpdateShotgridInAvalonCommand(
123,
project.id,
True,
ShotgridCredentials("", "", ""),
_default_fields_mapping(),
AvalonProjectData(),
)
# Act
sut.update_shotgrid_in_avalon(command)
# Assert
assert_that(call_list).is_length(4)
assert_that(call_list[0]).is_equal_to(data[0].object_id)
assert_that(call_list[1]).is_equal_to(data[1].object_id)
def test_shotgrid_to_avalon_batch_update_asset_hierarchy_db(
monkeypatch: MonkeyPatch,
):
# Arrange
client = MongoClient()
project = _get_project()
asset_grp = _get_asset_group(project)
data = [project, asset_grp, *_get_prp_assets(asset_grp)]
def upsert_mock(_, row):
return row["_id"]
insert_intermediate = Mock()
monkeypatch.setattr(conn, "get_db_client", _fun(client))
monkeypatch.setattr(repository, "get_hierarchy_by_project", _fun(data))
monkeypatch.setattr(
intermediate_hierarchy_repo, "fetch_by_project", _fun(data)
)
monkeypatch.setattr(
db_writer, "overwrite_intermediate", insert_intermediate
)
monkeypatch.setattr(db_writer, "upsert_avalon_row", upsert_mock)
command = UpdateShotgridInAvalonCommand(
123,
project.id,
True,
ShotgridCredentials("", "", ""),
_default_fields_mapping(),
AvalonProjectData(),
)
# Act
sut.update_shotgrid_in_avalon(command)
# Assert
assert_that(insert_intermediate.call_count).is_equal_to(1)
assert_that(insert_intermediate.call_args_list[0][0][1]).is_type_of(list)
assert_that(
insert_intermediate.call_args_list[0][0][1][0].object_id
).is_equal_to(data[0].object_id)
assert_that(
insert_intermediate.call_args_list[0][0][1][1].object_id
).is_equal_to(data[1].object_id)
assert_that(
insert_intermediate.call_args_list[0][0][1][2].object_id
).is_not_none()
assert_that(
insert_intermediate.call_args_list[0][0][1][3].object_id
).is_not_none()
def test_shotgrid_to_avalon_batch_update_asset_with_tasks(
monkeypatch: MonkeyPatch,
):
# Arrange
client = MongoClient()
project = _get_project()
asset_grp = _get_asset_group(project)
data = [project, asset_grp, *_get_prp_asset_with_tasks(asset_grp, 3)]
call_list = []
def upsert_mock(project_name, rows):
for x in rows:
call_list.append(x["_id"])
monkeypatch.setattr(conn, "get_db_client", _fun(client))
monkeypatch.setattr(repository, "get_hierarchy_by_project", _fun(data))
monkeypatch.setattr(
intermediate_hierarchy_repo, "fetch_by_project", _fun(data)
)
monkeypatch.setattr(db_writer, "overwrite_intermediate", _fun(None))
monkeypatch.setattr(db_writer, "upsert_avalon_rows", upsert_mock)
command = UpdateShotgridInAvalonCommand(
123,
project.id,
True,
ShotgridCredentials("", "", ""),
_default_fields_mapping(),
AvalonProjectData(),
)
# Act
sut.update_shotgrid_in_avalon(command)
# Assert
assert_that(call_list).is_length(4)
assert_that(call_list[0]).is_equal_to(data[0].object_id)
def test_shotgrid_to_avalon_batch_update_wrong_project_name(
monkeypatch: MonkeyPatch,
):
# Arrange
client = MongoClient()
data = [_get_project()]
monkeypatch.setattr(conn, "get_db_client", _fun(client))
monkeypatch.setattr(repository, "get_hierarchy_by_project", _fun(data))
openpype_project_name = str(uuid.uuid4())[0:8]
overwrite = bool(random.getrandbits(1))
command = UpdateShotgridInAvalonCommand(
123,
openpype_project_name,
overwrite,
ShotgridCredentials("", "", ""),
_default_fields_mapping(),
AvalonProjectData(),
)
# Act
res = sut.update_shotgrid_in_avalon(command)
# Assert
assert_that(res).is_equal_to(BatchResult.WRONG_PROJECT_NAME)
| StarcoderdataPython |
9602099 | <gh_stars>1-10
from tortoise import Tortoise
from app.config import config
async def init():
await Tortoise.init(
{
"connections": {
"default": {
"engine": "tortoise.backends.asyncpg",
"credentials": {
"host": config.PG.HOST,
"port": config.PG.PORT,
"user": config.PG.USER,
"password": config.PG.PASSWORD,
"database": config.PG.DATABASE,
},
}
},
"apps": {"models": {"models": ["app.db.models"], "default_connection": "default"}},
}
),
await Tortoise.generate_schemas()
| StarcoderdataPython |
9611746 | import requests
from bs4 import BeautifulSoup
name = '<NAME>'
name = (name.split())
search = 'https://www.google.co.uk/search?q='
for word in name:
search += word+'+'
print(search[:-1])
result = requests.get(search[:-1])
if result.status_code == requests.codes.ok:
soup = BeautifulSoup(result.content,'lxml')
print(len(soup))
profession = soup.find_all("div", class_="_zdb _Pxg")
print(profession[0].text)
| StarcoderdataPython |
1981672 | <filename>casbin_redis_watcher/options.py
import redis
import uuid
class WatcherOptions:
addr = None
sub_client = None
pub_client = None
channel = None
ignore_self = None
local_ID = None
optional_update_callback = None
def init_config(self):
if self.local_ID == "":
self.local_ID = uuid.uuid4()
if self.channel == "":
self.channel = "/casbin"
| StarcoderdataPython |
4833421 | <reponame>discovery-131794/django_local_library
from django.forms.forms import Form
from django.forms.models import ModelForm
from django.forms.fields import CharField, DateField
from .models import BookInstance
from datetime import date, timedelta
from django.core.exceptions import ValidationError
from .models import Author
class MarkForm(Form):
id = CharField(label='ID', disabled=True)
due_back = DateField(label='Renew_due_back', help_text='Input date between today and three weeks later.')
def save(self, id):
bookinstance = BookInstance.objects.get(id=id)
bookinstance.due_back = self.data['due_back']
bookinstance.save()
def clean_due_back(self):
due_back = self.cleaned_data['due_back']
if date.fromisoformat(self.data['due_back']) < date.today() or date.fromisoformat(self.data['due_back']) > (date.today()+timedelta(days=21)):
raise ValidationError('The date must be between today and three weeks later.')
return due_back
class CreateAuthorModelForm(ModelForm):
class Meta:
model = Author
fields = '__all__'
class UpdateAuthorModelForm(ModelForm):
class Meta:
model = Author
fields = '__all__' | StarcoderdataPython |
202304 | from .taxable import TaxableTx
from .entry_config import CRYPTO, TRANSFERS_OUT
debit_base_entry = {'side': "debit", **TRANSFERS_OUT}
credit_quote_entry = {'side': "credit", **CRYPTO}
entry_template = {
'debit': debit_base_entry,
'credit': credit_quote_entry
}
class Send(TaxableTx):
def __init__(self, **kwargs) -> None:
kwargs['type'] = 'send'
super().__init__(entry_template=entry_template.copy(), **kwargs)
def get_affected_balances(self):
affected_balances = {}
base = self.assets['base']
affected_balances[base.symbol] = -base.quantity
if 'fee' in self.assets:
fee = self.assets['fee']
affected_balances[fee.symbol] = -fee.quantity
return affected_balances
| StarcoderdataPython |
324965 | from microbit import *
class MIDI():
NOTE_ON = 0x90
NOTE_OFF = 0x80
CHAN_MSG = 0xB0
CHAN_BANK = 0x00
CHAN_VOLUME = 0x07
CHAN_PROGRAM = 0xC0
uart.init(baudrate=31250, bits=8, parity=None, stop=1, tx=pin0)
@staticmethod
def send(b0, b1, b2=None):
if b2 is None: m = bytes([b0,b1])
else: m = bytes([b0,b1,b2])
uart.write(m)
def __init__(self, channel=0, velocity=0x7F):
self.channel = channel
self.velocity = velocity
def set_instrument(self, instrument):
instrument -= 1
if instrument<0 or instrument>0x7F: return
self.send(self.CHAN_PROGRAM|self.channel, instrument)
def note_on(self, note, velocity=None):
if note<0 or note>0x7F:return
if velocity is None: velocity=self.velocity
if velocity<0 or velocity>0x7F: velocity=0x7F
self.send(self.NOTE_ON|self.channel, note, velocity)
def note_off(self, note, velocity=0x7F):
if note<0 or note>0x7F:return
if velocity is None: velocity=self.velocity
if velocity<0 or velocity>0x7F: velocity=0x7F
self.send(self.NOTE_OFF|self.channel, note, velocity)
midi = MIDI()
def slide():
while True:
for n in range(20, 90):
midi.note_on(n)
sleep(10)
midi.note_off(n)
sleep(10)
def acc():
min_n = 20 # 0
max_n = 90 # 127
prev_n = None
while True:
x = accelerometer.get_x()
x = min(x, 1000)
x = max(x, -1000)
x += 1000
n = min_n + x / (2000/(max_n-min_n))
n = int(n)
if prev_n is None or prev_n != n:
if prev_n is not None:
midi.note_off(prev_n)
midi.note_on(n)
prev_n = n
acc()
| StarcoderdataPython |
9653966 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 24 18:44:10 2021
@author: s14761
"""
import numpy as np
#Fin t such that the equilibrium is in mixed strategies
def determine_tmax(al, ah, T, t, P):
tmax=P*(ah-T)*(al+T)/((T*ah)-(al*al))
bl_hat=t*T/(al+T)
bh_hat=(P*(ah-T)+(t*al))/(al+ah)
return tmax, bl_hat, bh_hat
def determine_area(al, ah):
if al+ah <= 60:
area = 'B'
area_num = 1
else:
area = 'C'
area_num = 0
return area, area_num
def d_strategies_tt(al, ah, kl, kh, T, t, P, N):
#Call function determin_are to know in which area falls the realization of the demand
area, area_num = determine_area(al, ah)
p = np.zeros(N+1)
Fh = np.zeros(N+1)
Fl = np.zeros(N+1)
#Area B:
if area_num == 1:
#Spot
q11s = al+ah
q12s = 0
q21s = 0
q22s = al+ah
#Redisptach
q21r=(ah-T)-q21s
q22r=q22s-(al+T) #This value never enters in the d_strategies, since in the redispatch market, it is multiplied by 0
#Transmission
q11t=al
q12t=0
q21t=0
q22t=T
#Lower bound
b1 = ((P*(ah-T))+(t*q11t))/q11s
b2 = (t*q22t)/q22s
b = max(b1,b2)
eps = (P-b)/N
#d_strategies
for i in range(N+1):
p[i]=b+eps*(i)
if i == 0:
Fl[i]=0
Fh[i]=0
else:
Fl[i]=((p[i]-b)*q11s)/((p[i]*q11s)-(t*q11t)-(b*q21s)+(t*q21t)-(P*q21r))
Fh[i]=((p[i]-b)*q22s)/((p[i]*q22s)-(t*q22t)-(b*q12s)+(t*q12t))
#Area C:
else:
#Spot
q11s = kh
q12s = (al+ah-kh)
q21s = (al+ah-kl)
q22s = kl
#Redisptach
q21r=(ah-T)-q21s
q22r=q22s-(al+T) #This value never enters in the d_strategies, since in the redispatch market, it is multiplied by 0
#Transmission
q11t=max(0,kh-ah)
q12t=max(0,ah-kh)
q21t=max(0,al-kl) #In fact, for the parameters of the model, q21t=0
q22t=T
#Lower bound
b1 = ((P*(ah-T))+(t*q11t))/q11s
b2 = ((P*q12s)+(t*q22t)-(t*q12t))/q22s
b = max(b1,b2)
eps = (P-b)/N
#d_strategies
for i in range(N+1):
p[i]=b+eps*(i)
Fh[i]=((p[i]-b)*q22s)/((p[i]*q22s)-(t*q22t)-(p[i]*q12s)+(t*q12t))
Fl[i]=((p[i]-b)*q11s)/((p[i]*q11s)-(t*q11t)-(p[i]*q21s)+(t*q21t)-(P*q21r))
Fh[N] = 1
Fl[N] =1
return Fh, Fl, p, b1, b2
def u_strategies_tt(al, ah, kl, kh, T, t, P, N):
#Call function determin_are to know in which area falls the realization of the demand
area, area_num = determine_area(al, ah)
p = np.zeros(N+1)
Fh = np.zeros(N+1)
Fl = np.zeros(N+1)
#Area B:
if area_num == 1:
#Spot
q11s = al+ah
q12s = 0
q21s = 0
q22s = al+ah
#Redisptach
q21r=(ah-T)-q21s
q22r=q22s-(al+T) #This value never enters in the d_strategies, since in the redispatch market, it is multiplied by 0
#Transmission
q11t=al
q12t=0
q21t=0
q22t=T
#Lower bound
b1 = ((P*(ah-T))+(t*q11t))/q11s
b2 = (t*q22t)/q22s
b = max(b1,b2)
eps = (P-b)/N
#d_strategies
for i in range(N+1):
p[i]=b+eps*(i)
if i == 0:
Fl[i]=0
Fh[i]=0
else:
Fl[i]=((p[i]-b)*q11s)/((p[i]*q11s)-(t*q11t)-(b*q21s)+(t*q21t)-(P*q21r))
Fh[i]=((p[i]-b)*q22s)/((p[i]*q22s)-(t*q22t)-(b*q12s)+(t*q12t))
else:
#Spot
q11s = kh
q12s = (al+ah-kh)
q21s = (al+ah-kl)
q22s = kl
#Redisptach
q21r=(ah-T)-q21s
q22r=q22s-(al+T) #This value never enters in the d_strategies, since in the redispatch market, it is multiplied by 0
#Transmission
q11t=max(0,kh-ah)
q12t=max(0,ah-kh)
q21t=max(0,al-kl) #In fact, for the parameters of the model, q21t=0
q22t=T
#Lower bound
b1 = ((P*(ah-T))+(t*q11t))/q11s
b2 = ((P*q12s)+(t*q22t)-(t*q12t))/q22s
b = max(b1,b2)
eps = (P-b)/N
#d_strategies
for i in range(N+1):
p[i]=b+eps*(i)
Fl[i]=0
Fh[i]=0
return Fh, Fl, p, b1, b2
def d_strategies_pc(al, ah, kl, kh, T, t, P, N):
#Call function determin_are to know in which area falls the realization of the demand
area, area_num = determine_area(al, ah)
p = np.zeros(N+1)
Fh = np.zeros(N+1)
Fl = np.zeros(N+1)
#Area B:
if area_num == 1:
#Spot
q11s = al+ah
q12s = 0
q21s = 0
q22s = al+ah
#Redisptach
q21r=(ah-T)-q21s
q22r=q22s-(al+T) #This value never enters in the d_strategies, since in the redispatch market, it is multiplied by 0
#Lower bound
b1 = t+((P-t)*(ah-T)/q11s)
b2 = (t*(al+T))/q22s
b = max(b1,b2)
eps = (P-b)/N
#d_strategies
for i in range(N+1):
p[i]=b+eps*(i)
if i == 0:
Fl[i]=0
Fh[i]=0
else:
Fl[i]=((p[i]-b)*q11s)/(((p[i]-t)*q11s)-((p[i]-t)*q21s)-((P-t)*q21r))
Fh[i]=((p[i]-b)*q22s)/(((p[i]-t)*q22s)-((p[i]-t)*q12s))
#Area C:
else:
#Spot
q11s = kh
q12s = (al+ah-kh)
q21s = (al+ah-kl)
q22s = kl
#Redisptach
q21r=(ah-T)-q21s
q22r=q22s-(al+T) #This value never enters in the d_strategies, since in the redispatch market, it is multiplied by 0
#Lower bound
b1 = t+((P-t)*(ah-T)/q11s)
b2 = (((P-t)*q12s)+(t*(al+T)))/q22s
b = max(b1,b2)
eps = (P-b)/N
#d_strategies
for i in range(N+1):
p[i]=b+eps*(i)
Fh[i]=((p[i]-b)*q22s)/(((p[i]-t)*q22s)-((p[i]-t)*q12s))
Fl[i]=((p[i]-b)*q11s)/(((p[i]-t)*q11s)-((p[i]-t)*q21s)-((P-t)*q21r))
Fh[N] = 1
Fl[N] =1
return Fh, Fl, p, b1, b2
def u_strategies_pc(al, ah, kl, kh, T, t, P, N):
#Call function determin_are to know in which area falls the realization of the demand
area, area_num = determine_area(al, ah)
p = np.zeros(N+1)
Fh = np.zeros(N+1)
Fl = np.zeros(N+1)
#Area B:
if area_num == 1:
#Spot
q11s = al+ah
q12s = 0
q21s = 0
q22s = al+ah
#Redisptach
q21r=(ah-T)-q21s
q22r=q22s-(al+T) #This value never enters in the d_strategies, since in the redispatch market, it is multiplied by 0
#Lower bound
b1 = t+((P-t)*(ah-T)/q11s)
b2 = (t*(al+T))/q22s
b = max(b1,b2)
eps = (P-b)/N
#d_strategies
for i in range(N+1):
p[i]=b+eps*(i)
if i == 0:
Fl[i]=0
Fh[i]=0
else:
Fl[i]=((p[i]-b)*q11s)/(((p[i]-t)*q11s)-((p[i]-t)*q21s)-((P-t)*q21r))
Fh[i]=((p[i]-b)*q22s)/(((p[i]-t)*q22s)-((p[i]-t)*q12s))
#Area C:
else:
#Spot
q11s = kh
q12s = (al+ah-kh)
q21s = (al+ah-kl)
q22s = kl
#Redisptach
q21r=(ah-T)-q21s
q22r=q22s-(al+T) #This value never enters in the d_strategies, since in the redispatch market, it is multiplied by 0
#Lower bound
b1 = t+((P-t)*(ah-T)/q11s)
b2 = (((P-t)*q12s)+(t*(al+T)))/q22s
b = max(b1,b2)
eps = (P-b)/N
#d_strategies
for i in range(N+1):
p[i]=b+eps*(i)
Fl[i]=0
Fh[i]=0
return Fh, Fl, p, b1, b2
def d_welfare_tt(Fh, Fl, p, al, ah, T, t, kl, kh, P):
area, area_num = determine_area(al, ah)
Fh_diff = np.diff(Fh)
Fl_diff = np.diff(Fl)
Eh = sum (p[1:]*Fh_diff)
El = sum (p[1:]*Fl_diff)
E = (El*al/(al+ah))+(Eh*ah/(al+ah))
CS_capita = P-E
CS_aggregate = CS_capita*(al+ah)
#Area B:
if area_num == 1:
#Spot
q11s = al+ah
q12s = 0
q21s = 0
q22s = al+ah
#Redisptach
q21r=(ah-T)-q21s
q22r=q22s-(al+T) #This value never enters in the d_strategies, since in the redispatch market, it is multiplied by 0
#Transmission
q11t=al
q12t=0
q21t=0
q22t=T
#Lower bound
b1 = ((P*(ah-T))+(t*q11t))/q11s
b2 = (t*q22t)/q22s
b = max(b1,b2)
pil = (b*q22s)-(t*q22t)
pih = (b*q11s)-(t*q11t)
pi_aggregate = pil+pih
CS_capita_adjusted = CS_capita-(b*q22r/(al+ah))
#CS_capita_adjusted cannot be negative:
if CS_capita_adjusted < 0:
CS_capita_adjusted = 0
else:
CS_capita_adjusted = CS_capita_adjusted
CS_aggregate_adjusted = CS_capita_adjusted*(al+ah)
else:
#Spot
q11s = kh
q12s = (al+ah-kh)
q21s = (al+ah-kl)
q22s = kl
#Redisptach
q21r=(ah-T)-q21s
q22r=q22s-(al+T) #This value never enters in the d_strategies, since in the redispatch market, it is multiplied by 0
#Transmission
q11t=max(0,kh-ah)
q12t=max(0,ah-kh)
q21t=max(0,al-kl) #In fact, for the parameters of the model, q21t=0
q22t=T
#Lower bound
b1 = ((P*(ah-T))+(t*q11t))/q11s
b2 = ((P*q12s)+(t*q22t)-(t*q12t))/q22s
b = max(b1,b2)
pil = (b*q22s)-(t*q22t)
pih = (b*q11s)-(t*q11t)
pi_aggregate = pil+pih
CS_capita_adjusted = CS_capita-(b*q22r/(al+ah))
#CS_capita_adjusted cannot be negative:
if CS_capita_adjusted < 0:
CS_capita_adjusted = 0
else:
CS_capita_adjusted = CS_capita_adjusted
CS_aggregate_adjusted = CS_capita_adjusted*(al+ah)
welfare_aggregate = CS_aggregate + pi_aggregate
welfare_aggregate_adjusted = CS_aggregate_adjusted + pi_aggregate
return Eh, El, E, CS_capita, CS_capita_adjusted, CS_aggregate, CS_aggregate_adjusted, pil, pih, pi_aggregate, welfare_aggregate, welfare_aggregate_adjusted
def u_welfare_tt(Fh, Fl, p, al, ah, T, t, kl, kh, P):
area, area_num = determine_area(al, ah)
#Area B:
if area_num == 1:
#Spot
q11s = al+ah
q12s = 0
q21s = 0
q22s = al+ah
#Redisptach
q21r=(ah-T)-q21s
q22r=q22s-(al+T) #This value never enters in the d_strategies, since in the redispatch market, it is multiplied by 0
#Transmission
q11t=al
q12t=0
q21t=0
q22t=T
#CS
Fh_diff = np.diff(Fh)
Fl_diff = np.diff(Fl)
Eh = sum (p[1:]*Fh_diff)
El = sum (p[1:]*Fl_diff)
E = (El*al/(al+ah))+(Eh*ah/(al+ah))
CS_capita = P-E
CS_aggregate = CS_capita*(al+ah)
#Lower bound
b1 = ((P*(ah-T))+(t*q11t))/q11s
b2 = (t*q22t)/q22s
b = max(b1,b2)
pil = (b*q22s)-(t*q22t)
pih = (b*q11s)-(t*q11t)
pi_aggregate = pil+pih
CS_capita_adjusted = CS_capita-(b*q22r/(al+ah))
CS_aggregate_adjusted = CS_capita_adjusted*(al+ah)
else:
#Spot
q11s = kh
q12s = (al+ah-kh)
q21s = (al+ah-kl)
q22s = kl
#Redisptach
q21r=(ah-T)-q21s
q22r=q22s-(al+T) #This value never enters in the d_strategies, since in the redispatch market, it is multiplied by 0
#Transmission
q11t=max(0,kh-ah)
q12t=max(0,ah-kh)
q21t=max(0,al-kl) #In fact, for the parameters of the model, q21t=0
q22t=T
#CS
Eh = P
El = P
E = P
CS_capita = 0
CS_aggregate = 0
CS_capita_adjusted = 0
CS_aggregate_adjusted = 0
#Profits
#Profits equilibrium 1
pil = (P*q22s)-(t*q22t)
pih = P*(ah-T)
pi_aggregate = pil+pih
#Profits equilibrium 2
'''
pil = (P*q12s)-(t*max(0,al-q11s))
pih = (P*q11s)-(t*q11t)
pi_aggregate = pil+pih
'''
welfare_aggregate = CS_aggregate + pi_aggregate
welfare_aggregate_adjusted = CS_aggregate_adjusted + pi_aggregate
return Eh, El, E, CS_capita, CS_capita_adjusted, CS_aggregate, CS_aggregate_adjusted, pil, pih, pi_aggregate, welfare_aggregate, welfare_aggregate_adjusted
def d_welfare_pc(Fh, Fl, p, al, ah, T, t, kl, kh, P):
area, area_num = determine_area(al, ah)
Fh_diff = np.diff(Fh)
Fl_diff = np.diff(Fl)
Eh = sum (p[1:]*Fh_diff)
El = sum (p[1:]*Fl_diff)
E = (El*al/(al+ah))+(Eh*ah/(al+ah))
CS_capita = P-E
CS_aggregate = CS_capita*(al+ah)
#Area B:
if area_num == 1:
#Spot
q11s = al+ah
q12s = 0
q21s = 0
q22s = al+ah
#Redisptach
q21r=(ah-T)-q21s
q22r=q22s-(al+T) #This value never enters in the d_strategies, since in the redispatch market, it is multiplied by 0
#Lower bound
b1 = t+((P-t)*(ah-T)/q11s)
b2 = (t*(al+T))/q22s
b = max(b1,b2)
pil = (b-t)*q22s
pih = (b-t)*q11s
pi_aggregate = pil+pih
CS_capita_adjusted = CS_capita-(b*q22r/(al+ah))
#CS_capita_adjusted cannot be negative:
if CS_capita_adjusted < 0:
CS_capita_adjusted = 0
else:
CS_capita_adjusted = CS_capita_adjusted
CS_aggregate_adjusted = CS_capita_adjusted*(al+ah)
else:
#Spot
q11s = kh
q12s = (al+ah-kh)
q21s = (al+ah-kl)
q22s = kl
#Redisptach
q21r=(ah-T)-q21s
q22r=q22s-(al+T) #This value never enters in the d_strategies, since in the redispatch market, it is multiplied by 0
#Lower bound
b1 = t+((P-t)*(ah-T)/q11s)
b2 = (((P-t)*q12s)+(t*(al+T)))/q22s
b = max(b1,b2)
pil = (b-t)*q22s
pih = (b-t)*q11s
pi_aggregate = pil+pih
CS_capita_adjusted = CS_capita-(b*q22r/(al+ah))
#CS_capita_adjusted cannot be negative:
if CS_capita_adjusted < 0:
CS_capita_adjusted = 0
else:
CS_capita_adjusted = CS_capita_adjusted
CS_aggregate_adjusted = CS_capita_adjusted*(al+ah)
welfare_aggregate = CS_aggregate + pi_aggregate
welfare_aggregate_adjusted = CS_aggregate_adjusted + pi_aggregate
return Eh, El, E, CS_capita, CS_capita_adjusted, CS_aggregate, CS_aggregate_adjusted, pil, pih, pi_aggregate, welfare_aggregate, welfare_aggregate_adjusted
def u_welfare_pc(Fh, Fl, p, al, ah, T, t, kl, kh, P):
area, area_num = determine_area(al, ah)
#Area B:
if area_num == 1:
#Spot
q11s = al+ah
q12s = 0
q21s = 0
q22s = al+ah
#Redisptach
q21r=(ah-T)-q21s
q22r=q22s-(al+T) #This value never enters in the d_strategies, since in the redispatch market, it is multiplied by 0
#CS
Fh_diff = np.diff(Fh)
Fl_diff = np.diff(Fl)
Eh = sum (p[1:]*Fh_diff)
El = sum (p[1:]*Fl_diff)
E = (El*al/(al+ah))+(Eh*ah/(al+ah))
CS_capita = P-E
CS_aggregate = CS_capita*(al+ah)
#Lower bound
b1 = t+((P-t)*(ah-T)/q11s)
b2 = (t*(al+T))/q22s
b = max(b1,b2)
pil = (b-t)*q22s
pih = (b-t)*q11s
pi_aggregate = pil+pih
CS_capita_adjusted = CS_capita-(b*q22r/(al+ah))
CS_aggregate_adjusted = CS_capita_adjusted*(al+ah)
else:
#Spot
q11s = kh
q12s = (al+ah-kh)
q21s = (al+ah-kl)
q22s = kl
#Redisptach
q21r=(ah-T)-q21s
q22r=q22s-(al+T) #This value never enters in the d_strategies, since in the redispatch market, it is multiplied by 0
#CS
Eh = 0
El = 0
E = P
CS_capita = 0
CS_aggregate = 0
CS_capita_adjusted = 0
CS_aggregate_adjusted = 0
#Profits
#Profits equilibrium 1
pil = (P-t)*q22s
pih = (P-t)*(ah-T)
pi_aggregate = pil+pih
#Profits equilibrium 2
'''
pil = (P-t)*q12s
pih = (P-t)*q11s
pi_aggregate = pil+pih
'''
welfare_aggregate = CS_aggregate + pi_aggregate
welfare_aggregate_adjusted = CS_aggregate_adjusted + pi_aggregate
return Eh, El, E, CS_capita, CS_capita_adjusted, CS_aggregate, CS_aggregate_adjusted, pil, pih, pi_aggregate, welfare_aggregate, welfare_aggregate_adjusted
def d_simulate_model(al, ah, kl, kh, T, t, P, N, model, auction):
if auction == 'discriminatory':
if model == 'tt':
Fh, Fl, p, b1_tt, b2_tt = d_strategies_tt(al, ah, kl, kh, T, t, P, N)
Eh, El, E, CS_capita, CS_capita_adjusted, CS_aggregate, CS_aggregate_adjusted, pil, pih, pi_aggregate, welfare_aggregate, welfare_aggregate_adjusted = d_welfare_tt(Fh, Fl, p, al, ah, T, t, kl, kh, P)
else:
Fh, Fl, p, b1_pc, b2_pc = d_strategies_pc(al, ah, kl, kh, T, t, P, N)
Eh, El, E, CS_capita, CS_capita_adjusted, CS_aggregate, CS_aggregate_adjusted, pil, pih, pi_aggregate, welfare_aggregate, welfare_aggregate_adjusted = d_welfare_pc(Fh, Fl, p, al, ah, T, t, kl, kh, P)
return Eh, El, E, CS_capita, CS_capita_adjusted, CS_aggregate, CS_aggregate_adjusted, pil, pih, pi_aggregate, welfare_aggregate, welfare_aggregate_adjusted
else:
if model == 'tt':
Fh, Fl, p, b1_tt, b2_tt = d_strategies_tt(al, ah, kl, kh, T, t, P, N)
Eh, El, E, CS_capita, CS_capita_adjusted, CS_aggregate, CS_aggregate_adjusted, pil, pih, pi_aggregate, welfare_aggregate, welfare_aggregate_adjusted = u_welfare_tt(Fh, Fl, p, al, ah, T, t, kl, kh, P)
else:
Fh, Fl, p, b1_pc, b2_pc = d_strategies_pc(al, ah, kl, kh, T, t, P, N)
Eh, El, E, CS_capita, CS_capita_adjusted, CS_aggregate, CS_aggregate_adjusted, pil, pih, pi_aggregate, welfare_aggregate, welfare_aggregate_adjusted = u_welfare_pc(Fh, Fl, p, al, ah, T, t, kl, kh, P)
return Eh, El, E, CS_capita, CS_capita_adjusted, CS_aggregate, CS_aggregate_adjusted, pil, pih, pi_aggregate, welfare_aggregate, welfare_aggregate_adjusted
def d_plot_welfare_tt(al, kl, kh, T, t, P, N, N2):
ah_lst = np.linspace(41, 99, N2)
Eh_lst = []
El_lst = []
E_lst = []
CS_capita_lst = []
CS_capita_adjusted_lst = []
CS_aggregate_lst = []
CS_aggregate_adjusted_lst = []
pil_lst = []
pih_lst = []
pi_aggregate_lst = []
welfare_aggregate_lst = []
welfare_aggregate_adjusted_lst = []
for ah in ah_lst:
Fh, Fl, p, b1_tt, b2_tt = d_strategies_tt(al, ah, kl, kh, T, t, P, N)
Eh, El, E, CS_capita, CS_capita_adjusted, CS_aggregate, CS_aggregate_adjusted, pil, pih, pi_aggregate, welfare_aggregate, welfare_aggregate_adjusted = d_welfare_tt(Fh, Fl, p, al, ah, T, t, kl, kh, P)
Eh_lst.append(Eh)
El_lst.append(El)
E_lst.append(E)
CS_capita_lst.append(CS_capita)
CS_capita_adjusted_lst.append(CS_capita_adjusted)
CS_aggregate_lst.append(CS_aggregate)
CS_aggregate_adjusted_lst.append(CS_aggregate_adjusted)
pil_lst.append(pil)
pih_lst.append(pih)
pi_aggregate_lst.append(pi_aggregate)
welfare_aggregate_lst.append(welfare_aggregate)
welfare_aggregate_adjusted_lst.append(welfare_aggregate_adjusted)
return ah_lst, Eh_lst, El_lst, E_lst, CS_capita_lst, CS_capita_adjusted_lst, CS_aggregate_lst, CS_aggregate_adjusted_lst, pil_lst, pih_lst, pi_aggregate_lst, welfare_aggregate_lst, welfare_aggregate_adjusted_lst
def u_plot_welfare_tt(al, kl, kh, T, t, P, N, N2):
ah_lst = np.linspace(41, 99, N2)
Eh_lst = []
El_lst = []
E_lst = []
CS_capita_lst = []
CS_capita_adjusted_lst = []
CS_aggregate_lst = []
CS_aggregate_adjusted_lst = []
pil_lst = []
pih_lst = []
pi_aggregate_lst = []
welfare_aggregate_lst = []
welfare_aggregate_adjusted_lst = []
for ah in ah_lst:
Fh, Fl, p, b1_tt, b2_tt = u_strategies_tt(al, ah, kl, kh, T, t, P, N)
Eh, El, E, CS_capita, CS_capita_adjusted, CS_aggregate, CS_aggregate_adjusted, pil, pih, pi_aggregate, welfare_aggregate, welfare_aggregate_adjusted = u_welfare_tt(Fh, Fl, p, al, ah, T, t, kl, kh, P)
Eh_lst.append(Eh)
El_lst.append(El)
E_lst.append(E)
CS_capita_lst.append(CS_capita)
CS_capita_adjusted_lst.append(CS_capita_adjusted)
CS_aggregate_lst.append(CS_aggregate)
CS_aggregate_adjusted_lst.append(CS_aggregate_adjusted)
pil_lst.append(pil)
pih_lst.append(pih)
pi_aggregate_lst.append(pi_aggregate)
welfare_aggregate_lst.append(welfare_aggregate)
welfare_aggregate_adjusted_lst.append(welfare_aggregate_adjusted)
return ah_lst, Eh_lst, El_lst, E_lst, CS_capita_lst, CS_capita_adjusted_lst, CS_aggregate_lst, CS_aggregate_adjusted_lst, pil_lst, pih_lst, pi_aggregate_lst, welfare_aggregate_lst, welfare_aggregate_adjusted_lst
def d_plot_welfare_pc(al, kl, kh, T, t, P, N, N2):
ah_lst = np.linspace(41, 99, N2)
Eh_lst = []
El_lst = []
E_lst = []
CS_capita_lst = []
CS_capita_adjusted_lst = []
CS_aggregate_lst = []
CS_aggregate_adjusted_lst = []
pil_lst = []
pih_lst = []
pi_aggregate_lst = []
welfare_aggregate_lst = []
welfare_aggregate_adjusted_lst = []
for ah in ah_lst:
Fh, Fl, p, b1_pc, b2_pc = d_strategies_pc(al, ah, kl, kh, T, t, P, N)
Eh, El, E, CS_capita, CS_capita_adjusted, CS_aggregate, CS_aggregate_adjusted, pil, pih, pi_aggregate, welfare_aggregate, welfare_aggregate_adjusted = d_welfare_pc(Fh, Fl, p, al, ah, T, t, kl, kh, P)
Eh_lst.append(Eh)
El_lst.append(El)
E_lst.append(E)
CS_capita_lst.append(CS_capita)
CS_capita_adjusted_lst.append(CS_capita_adjusted)
CS_aggregate_lst.append(CS_aggregate)
CS_aggregate_adjusted_lst.append(CS_aggregate_adjusted)
pil_lst.append(pil)
pih_lst.append(pih)
pi_aggregate_lst.append(pi_aggregate)
welfare_aggregate_lst.append(welfare_aggregate)
welfare_aggregate_adjusted_lst.append(welfare_aggregate_adjusted)
return ah_lst, Eh_lst, El_lst, E_lst, CS_capita_lst, CS_capita_adjusted_lst, CS_aggregate_lst, CS_aggregate_adjusted_lst, pil_lst, pih_lst, pi_aggregate_lst, welfare_aggregate_lst, welfare_aggregate_adjusted_lst
def u_plot_welfare_pc(al, kl, kh, T, t, P, N, N2):
ah_lst = np.linspace(41, 99, N2)
Eh_lst = []
El_lst = []
E_lst = []
CS_capita_lst = []
CS_capita_adjusted_lst = []
CS_aggregate_lst = []
CS_aggregate_adjusted_lst = []
pil_lst = []
pih_lst = []
pi_aggregate_lst = []
welfare_aggregate_lst = []
welfare_aggregate_adjusted_lst = []
for ah in ah_lst:
Fh, Fl, p, b1_pc, b2_pc = u_strategies_pc(al, ah, kl, kh, T, t, P, N)
Eh, El, E, CS_capita, CS_capita_adjusted, CS_aggregate, CS_aggregate_adjusted, pil, pih, pi_aggregate, welfare_aggregate, welfare_aggregate_adjusted = u_welfare_pc(Fh, Fl, p, al, ah, T, t, kl, kh, P)
Eh_lst.append(Eh)
El_lst.append(El)
E_lst.append(E)
CS_capita_lst.append(CS_capita)
CS_capita_adjusted_lst.append(CS_capita_adjusted)
CS_aggregate_lst.append(CS_aggregate)
CS_aggregate_adjusted_lst.append(CS_aggregate_adjusted)
pil_lst.append(pil)
pih_lst.append(pih)
pi_aggregate_lst.append(pi_aggregate)
welfare_aggregate_lst.append(welfare_aggregate)
welfare_aggregate_adjusted_lst.append(welfare_aggregate_adjusted)
return ah_lst, Eh_lst, El_lst, E_lst, CS_capita_lst, CS_capita_adjusted_lst, CS_aggregate_lst, CS_aggregate_adjusted_lst, pil_lst, pih_lst, pi_aggregate_lst, welfare_aggregate_lst, welfare_aggregate_adjusted_lst
if __name__=='__main__':
#Colors
'''
colors = {
"charcoal": "#264653ff",
"persian-green": "#2a9d8fff",
"orange-yellow-crayola": "#e9c46aff",
"sandy-brown": "#f4a261ff",
"burnt-sienna": "#e76f51ff"}
'''
colors = {
"c": "#264653ff",
"p-g": "#2a9d8fff",
"o-y-c": "#e9c46aff",
"s-b": "#f4a261ff",
"b-s": "#e76f51ff"}
#Parameters
al = 15
ah = 50.5
kl = 60
kh = 60
T = 40
t = 1.25
P = 7
N = 100
N2 = 400
tmax, bl_hat, bh_hat = determine_tmax(al, ah, T, t, P)
####################################
### Discriminatory price auction ###
####################################
##Call the functions
#Determine the area
area, area_num = determine_area(al, ah)
#d_strategies_tt
Fh_tt, Fl_tt, p_tt, b1_tt, b2_tt = d_strategies_tt(al, ah, kl, kh, T, t, P, N)
Eh_tt, El_tt, E_tt, CS_capita, CS_capita_adjusted, CS_aggregate, CS_aggregate_adjusted, pil, pih, pi_aggregate, welfare_aggregate, welfare_aggregate_adjusted=d_welfare_tt(Fh_tt, Fl_tt, p_tt, al, ah, T, t, kl, kh, P)
#d_strategies_pc
Fh_pc, Fl_pc, p_pc, b1_pc, b2_pc = d_strategies_pc(al, ah, kl, kh, T, t, P, N)
Eh_pc, El_pc, E_pc, CS_capita, CS_capita_adjusted, CS_aggregate, CS_aggregate_adjusted, pil, pih, pi_aggregate, welfare_aggregate, welfare_aggregate_adjusted=d_welfare_pc(Fh_pc, Fl_pc, p_pc, al, ah, T, t, kl, kh, P)
#Welfare tt
ah_lst_tt, Eh_lst_tt, El_lst_tt, E_lst_tt, CS_capita_lst_tt, CS_capita_adjusted_lst_tt, CS_aggregate_lst_tt, CS_aggregate_adjusted_lst_tt, pil_lst_tt, pih_lst_tt, pi_aggregate_lst_tt, welfare_aggregate_lst_tt, welfare_aggregate_adjusted_lst_tt=d_plot_welfare_tt(al, kl, kh, T, t, P, N, N2)
#Welfare pc
ah_lst_pc, Eh_lst_pc, El_lst_pc, E_lst_pc, CS_capita_lst_pc, CS_capita_adjusted_lst_pc, CS_aggregate_lst_pc, CS_aggregate_adjusted_lst_pc, pil_lst_pc, pih_lst_pc, pi_aggregate_lst_pc, welfare_aggregate_lst_pc, welfare_aggregate_adjusted_lst_pc=d_plot_welfare_pc(al, kl, kh, T, t, P, N, N2)
#Compare welfare
ah_lst_tt, Eh_lst_tt, El_lst_tt, E_lst_tt, CS_capita_lst_tt, CS_capita_adjusted_lst_tt, CS_aggregate_lst_tt, CS_aggregate_adjusted_lst_tt, pil_lst_tt, pih_lst_tt, pi_aggregate_lst_tt, welfare_aggregate_lst_tt, welfare_aggregate_adjusted_lst_tt=d_plot_welfare_tt(al, kl, kh, T, t, P, N, N2)
ah_lst_pc, Eh_lst_pc, El_lst_pc, E_lst_pc, CS_capita_lst_pc, CS_capita_adjusted_lst_pc, CS_aggregate_lst_pc, CS_aggregate_adjusted_lst_pc, pil_lst_pc, pih_lst_pc, pi_aggregate_lst_pc, welfare_aggregate_lst_pc, welfare_aggregate_adjusted_lst_pc=d_plot_welfare_pc(al, kl, kh, T, t, P, N, N2)
###############################################
### Discriminatory price auction: tt vs. pc ###
###############################################
##Plot the functions
import matplotlib.pyplot as plt
#d_profits comparison
fig, ax = plt.subplots()
ax.plot(ah_lst_tt, pih_lst_tt, label = 'pih_tt', color = colors["c"])
ax.plot(ah_lst_tt, pil_lst_tt, label = 'pil_tt', color = colors["p-g"])
ax.plot(ah_lst_pc, pih_lst_pc, label = 'Fh_pc', color = colors["c"], alpha=0.2)
ax.plot(ah_lst_pc, pil_lst_pc, label = 'Fl_pc', color = colors["p-g"], alpha=0.2)
#d_strategies_tt
fig, ax = plt.subplots()
ax.plot(p_tt, Fh_tt, label = 'Fh_tt', color = colors["c"])
ax.plot(p_tt, Fl_tt, label = 'Fl_tt', color = colors["p-g"])
ax.plot([Eh_tt,Eh_tt], [0,1], label = 'Eh_tt', color = colors["c"])
ax.plot([El_tt,El_tt], [0,1], label = 'El_tt', color = colors["p-g"])
ax.plot([E_tt,E_tt], [0,1], label = 'E_tt', color = colors["o-y-c"])
#d_strategies_pc
fig, ax = plt.subplots()
ax.plot(p_pc, Fh_pc, label = 'Fh_pc', color = colors["c"], alpha=1)
ax.plot(p_pc, Fl_pc, label = 'Fl_pc', color = colors["p-g"], alpha=1)
ax.plot([Eh_pc,Eh_pc], [0,1], label = 'Eh_pc', color = colors["c"], alpha=1)
ax.plot([El_pc,El_pc], [0,1], label = 'El_pc', color = colors["p-g"], alpha=1)
ax.plot([E_pc,E_pc], [0,1], label = 'E_pc', color = colors["o-y-c"], alpha=1)
#d_strategies comparison
fig, ax = plt.subplots()
ax.plot(p_tt, Fh_tt, label = 'Fh_tt', color = colors["c"])
ax.plot(p_tt, Fl_tt, label = 'Fl_tt', color = colors["p-g"])
ax.plot([Eh_tt,Eh_tt], [0,1], label = 'Eh_tt', color = colors["c"])
ax.plot([El_tt,El_tt], [0,1], label = 'El_tt', color = colors["p-g"])
ax.plot([E_tt,E_tt], [0,1], label = 'E_tt', color = colors["o-y-c"])
ax.plot(p_pc, Fh_pc, label = 'Fh_pc', color = colors["c"], alpha=0.2)
ax.plot(p_pc, Fl_pc, label = 'Fl_pc', color = colors["p-g"], alpha=0.2)
ax.plot([Eh_pc,Eh_pc], [0,1], label = 'Eh_pc', color = colors["c"], alpha=0.2)
ax.plot([El_pc,El_pc], [0,1], label = 'El_pc', color = colors["p-g"], alpha=0.2)
ax.plot([E_pc,E_pc], [0,1], label = 'E_pc', color = colors["o-y-c"], alpha=0.2)
#d_strategies paper
fig, ax = plt.subplots(ncols = 3, figsize = (20, 9))
#Axes1. tt
ax[0].plot(p_tt, Fh_tt, label = 'Fh_tt', color = colors["c"])
ax[0].plot(p_tt, Fl_tt, label = 'Fl_tt', color = colors["p-g"])
ax[0].plot([Eh_tt,Eh_tt], [0,1], label = 'Eh_tt', color = colors["c"])
ax[0].plot([El_tt,El_tt], [0,1], label = 'El_tt', color = colors["p-g"])
ax[0].plot([E_tt,E_tt], [0,1], label = 'E_tt', color = colors["o-y-c"])
ax[0].text(4.55, 0.8, "$F_h^{tt}(b)$", fontsize=18)
ax[0].text(4.55, 0.95, "$F_l^{tt}(b)$", fontsize=18)
ax[0].text(1.25, 1.01, "$E_l^{tt}$", fontsize=18)
ax[0].text(2.2, 1.01, "$E^{tt}$", fontsize=18)
ax[0].text(2.9, 1.01, "$E_h^{tt}$", fontsize=18)
ax[0].set_ylim(0, 1.1)
ax[0].set(xticks=[0, b1_tt, P], xticklabels=['0', '${b}^{tt}$', 'P'],
yticks=[0, 1], yticklabels=['0', '1'])
ax[0].set_ylabel('$\\theta_h$', fontsize=18)
ax[0].set_xlabel('$\\theta_l$', fontsize=18)
ax[0].set_title('strategies tt', fontsize=20)
#Axes2. tt vs. pc
ax[1].plot(p_tt, Fh_tt, label = 'Fh_tt', color = colors["c"])
ax[1].plot(p_tt, Fl_tt, label = 'Fl_tt', color = colors["p-g"])
ax[1].plot([Eh_tt,Eh_tt], [0,1], label = 'Eh_tt', color = colors["c"])
ax[1].plot([El_tt,El_tt], [0,1], label = 'El_tt', color = colors["p-g"])
ax[1].plot([E_tt,E_tt], [0,1], label = 'E_tt', color = colors["o-y-c"])
ax[1].text(4.55, 0.8, "$F_h^{tt}(b)$", fontsize=18)
ax[1].text(4.55, 0.95, "$F_l^{tt}(b)$", fontsize=18)
ax[1].text(1.25, 1.01, "$E_l^{tt}$", fontsize=18)
ax[1].text(2.2, 1.01, "$E^{tt}$", fontsize=18)
ax[1].text(2.9, 1.01, "$E_h^{tt}$", fontsize=18)
ax[1].plot(p_pc, Fh_pc, label = 'Fh_pc', color = colors["c"], alpha=0.3)
ax[1].plot(p_pc, Fl_pc, label = 'Fl_pc', color = colors["p-g"], alpha=0.3)
ax[1].plot([Eh_pc,Eh_pc], [0,1], label = 'Eh_pc', color = colors["c"], alpha=0.3)
ax[1].plot([El_pc,El_pc], [0,1], label = 'El_pc', color = colors["p-g"], alpha=0.3)
ax[1].plot([E_pc,E_pc], [0,1], label = 'E_pc', color = colors["o-y-c"], alpha=0.3)
ax[1].set_ylim(0, 1.1)
ax[1].set(xticks=[0, b1_tt, b1_pc, P], xticklabels=['0', '${b}^{tt}$', '${b}^{pc}$', 'P'],
yticks=[0, 1], yticklabels=['0', '1'])
ax[1].set_ylabel('$\\theta_h$', fontsize=18)
ax[1].set_xlabel('$\\theta_l$', fontsize=18)
ax[1].set_title('strategies tt vs. pc', fontsize=20)
#Axes3. pc
ax[2].plot(p_pc, Fh_pc, label = 'Fh_pc', color = colors["c"], alpha=0.3)
ax[2].plot(p_pc, Fl_pc, label = 'Fl_pc', color = colors["p-g"], alpha=0.3)
ax[2].plot([Eh_pc,Eh_pc], [0,1], label = 'Eh_pc', color = colors["c"], alpha=0.3)
ax[2].plot([El_pc,El_pc], [0,1], label = 'El_pc', color = colors["p-g"], alpha=0.3)
ax[2].plot([E_pc,E_pc], [0,1], label = 'E_pc', color = colors["o-y-c"], alpha=0.3)
ax[2].text(4.55+1.2, 0.75, "$F_h^{pc}(b)$", fontsize=18, alpha=1)
ax[2].text(4.55+1.2, 1.01, "$F_l^{pc}(b)$", fontsize=18, alpha=1)
ax[2].text(1.25+1.2, 1.01, "$E_l^{pc}$", fontsize=18, alpha=1)
ax[2].text(2.2+1.2, 1.01, "$E^{pc}$", fontsize=18, alpha=1)
ax[2].text(2.9+1.2, 1.01, "$E_h^{pc}$", fontsize=18, alpha=1)
ax[2].set_ylim(0, 1.1)
ax[2].set(xticks=[0, b1_pc, P], xticklabels=['0', '${b}^{pc}$', 'P'],
yticks=[0, 1], yticklabels=['0', '1'])
ax[2].set_ylabel('$\\theta_h$', fontsize=18)
ax[2].set_xlabel('$\\theta_l$', fontsize=18)
ax[2].set_title('strategies pc', fontsize=20)
plt.show()
#Welfare tt
fig, ax = plt.subplots()
ax.plot(ah_lst_tt, Eh_lst_tt, label = 'Eh_lst_tt', color = colors["c"], alpha=1)
ax.plot(ah_lst_tt, El_lst_tt, label = 'El_lst_tt', color = colors["p-g"], alpha=1)
ax.plot(ah_lst_tt, E_lst_tt, label = 'E_lst_tt', color = colors["o-y-c"], alpha=1)
fig, ax = plt.subplots()
ax.plot(ah_lst_tt, CS_aggregate_adjusted_lst_tt, label = 'CS_aggregate_adjusted_lst_tt', color = colors["b-s"], alpha=1)
ax.plot(ah_lst_tt, CS_aggregate_lst_tt, label = 'CS_aggregate_lst_tt', color = colors["c"], alpha=1)
fig, ax = plt.subplots()
ax.plot(ah_lst_tt, pil_lst_tt, label = 'pil_lst_tt', color = colors["c"], alpha=1)
ax.plot(ah_lst_tt, pih_lst_tt, label = 'pih_lst_tt', color = colors["p-g"], alpha=1)
fig, ax = plt.subplots()
ax.plot(ah_lst_tt, welfare_aggregate_adjusted_lst_tt, label = 'CS_aggregate_adjusted_lst_tt', color = colors["b-s"], alpha=1)
ax.plot(ah_lst_tt, welfare_aggregate_lst_tt, label = 'CS_aggregate_lst_tt', color = colors["c"], alpha=1)
#Welfare pc
fig, ax = plt.subplots()
ax.plot(ah_lst_pc, Eh_lst_pc, label = 'Eh_lst_pc', color = colors["c"], alpha=0.2)
ax.plot(ah_lst_pc, El_lst_pc, label = 'El_lst_pc', color = colors["p-g"], alpha=0.2)
ax.plot(ah_lst_pc, E_lst_pc, label = 'E_lst_pc', color = colors["o-y-c"], alpha=0.2)
fig, ax = plt.subplots()
ax.plot(ah_lst_pc, CS_aggregate_adjusted_lst_pc, label = 'CS_aggregate_adjusted_lst_pc', color = colors["b-s"], alpha=0.2)
ax.plot(ah_lst_pc, CS_aggregate_lst_pc, label = 'CS_aggregate_lst_pc', color = colors["c"], alpha=0.2)
fig, ax = plt.subplots()
ax.plot(ah_lst_pc, pil_lst_pc, label = 'pil_lst_pc', color = colors["c"], alpha=0.2)
ax.plot(ah_lst_pc, pih_lst_pc, label = 'pih_lst_pc', color = colors["p-g"], alpha=0.2)
fig, ax = plt.subplots()
ax.plot(ah_lst_pc, welfare_aggregate_adjusted_lst_pc, label = 'CS_aggregate_adjusted_lst_pc', color = colors["b-s"], alpha=0.2)
ax.plot(ah_lst_pc, welfare_aggregate_lst_pc, label = 'CS_aggregate_lst_pc', color = colors["c"], alpha=0.2)
#Compare welfare
fig, ax = plt.subplots()
ax.plot(ah_lst_tt, Eh_lst_tt, label = 'Eh_lst_tt', color = colors["c"], alpha=1)
ax.plot(ah_lst_tt, El_lst_tt, label = 'El_lst_tt', color = colors["p-g"], alpha=1)
ax.plot(ah_lst_tt, E_lst_tt, label = 'E_lst_tt', color = colors["o-y-c"], alpha=1)
ax.plot(ah_lst_pc, Eh_lst_pc, label = 'Eh_lst_pc', color = colors["c"], alpha=0.3)
ax.plot(ah_lst_pc, El_lst_pc, label = 'El_lst_pc', color = colors["p-g"], alpha=0.3)
ax.plot(ah_lst_pc, E_lst_pc, label = 'E_lst_pc', color = colors["o-y-c"], alpha=0.3)
fig, ax = plt.subplots()
ax.plot(ah_lst_tt, CS_aggregate_adjusted_lst_tt, label = 'CS_aggregate_adjusted_lst_tt', color = colors["b-s"], alpha=1)
ax.plot(ah_lst_tt, CS_aggregate_lst_tt, label = 'CS_aggregate_lst_tt', color = colors["c"], alpha=1)
ax.plot(ah_lst_pc, CS_aggregate_adjusted_lst_pc, label = 'CS_aggregate_adjusted_lst_pc', color = colors["b-s"], alpha=0.2)
ax.plot(ah_lst_pc, CS_aggregate_lst_pc, label = 'CS_aggregate_lst_pc', color = colors["c"], alpha=0.2)
fig, ax = plt.subplots()
ax.plot(ah_lst_tt, pil_lst_tt, label = 'pil_lst_tt', color = colors["c"], alpha=1)
ax.plot(ah_lst_tt, pih_lst_tt, label = 'pih_lst_tt', color = colors["p-g"], alpha=1)
ax.plot(ah_lst_pc, pil_lst_pc, label = 'pil_lst_pc', color = colors["c"], alpha=0.2)
ax.plot(ah_lst_pc, pih_lst_pc, label = 'pih_lst_pc', color = colors["p-g"], alpha=0.2)
fig, ax = plt.subplots()
ax.plot(ah_lst_tt, welfare_aggregate_adjusted_lst_tt, label = 'CS_aggregate_adjusted_lst_tt', color = colors["b-s"], alpha=1)
ax.plot(ah_lst_tt, welfare_aggregate_lst_tt, label = 'CS_aggregate_lst_tt', color = colors["c"], alpha=1)
ax.plot(ah_lst_pc, welfare_aggregate_adjusted_lst_pc, label = 'CS_aggregate_adjusted_lst_pc', color = colors["b-s"], alpha=0.2)
ax.plot(ah_lst_pc, welfare_aggregate_lst_pc, label = 'CS_aggregate_lst_pc', color = colors["c"], alpha=0.2)
#d_welfare paper
fig, ax = plt.subplots(ncols = 4, figsize = (20, 9))
#Axes1. E
ax[0].plot(ah_lst_tt, Eh_lst_tt, label = 'Eh_lst_tt', color = colors["c"], alpha=1)
ax[0].plot(ah_lst_tt, El_lst_tt, label = 'El_lst_tt', color = colors["p-g"], alpha=1)
ax[0].plot(ah_lst_tt, E_lst_tt, label = 'E_lst_tt', color = colors["o-y-c"], alpha=1)
ax[0].plot(ah_lst_pc, Eh_lst_pc, label = 'Eh_lst_pc', color = colors["c"], alpha=0.3)
ax[0].plot(ah_lst_pc, El_lst_pc, label = 'El_lst_pc', color = colors["p-g"], alpha=0.3)
ax[0].plot(ah_lst_pc, E_lst_pc, label = 'E_lst_pc', color = colors["o-y-c"], alpha=0.3)
ax[0].text(40, 1.4, "$E^{tt}$", fontsize=18)
ax[0].text(40, 0.78, "$E_h^{tt}$", fontsize=18)
ax[0].text(40, 2.5, "$E_l^{tt}$", fontsize=18)
ax[0].set_ylim(0.75, 7.5)
ax[0].set(xticks=[41, 70, 99], xticklabels=['41', '70', '99'],
yticks=[1, 7], yticklabels=['1', 'P'])
ax[0].set_xlabel('$\\theta_h$', fontsize=18)
ax[0].set_ylabel('expected price', fontsize=18)
ax[0].yaxis.set_label_coords(-0.02, 0.5)
ax[0].set_title('expected price tt vs. pc', fontsize=20)
#Axes2. CS_adjusted
ax[1].plot(ah_lst_tt, CS_aggregate_adjusted_lst_tt, label = 'CS_aggregate_adjusted_lst_tt', color = colors["b-s"], alpha=1)
ax[1].plot(ah_lst_tt, CS_aggregate_lst_tt, label = 'CS_aggregate_lst_tt', color = colors["c"], alpha=1)
ax[1].plot(ah_lst_pc, CS_aggregate_adjusted_lst_pc, label = 'CS_aggregate_adjusted_lst_pc', color = colors["b-s"], alpha=0.3)
ax[1].plot(ah_lst_pc, CS_aggregate_lst_pc, label = 'CS_aggregate_lst_pc', color = colors["c"], alpha=0.3)
ax[1].text(75, 150, "$CS^{tt}$", fontsize=18)
ax[1].text(43, 150, "$CS_{adjusted}^{tt}$", fontsize=18)
ax[1].set_ylim(0, 350)
ax[1].set(xticks=[41, 70, 99], xticklabels=['41', '70', '99'],
yticks=[0, 350], yticklabels=['0', '350'])
ax[1].set_xlabel('$\\theta_h$', fontsize=18)
ax[1].set_ylabel('CS adjusted', fontsize=18)
ax[1].yaxis.set_label_coords(-0.02, 0.5)
ax[1].set_title('CS adjusted tt vs. pc', fontsize=20)
#Axes3. profits
ax[2].plot(ah_lst_tt, pih_lst_tt, label = 'pih_lst_tt', color = colors["c"], alpha=1)
ax[2].plot(ah_lst_tt, pil_lst_tt, label = 'pil_lst_tt', color = colors["p-g"], alpha=1)
#ax[2].plot(ah_lst_pc, pih_lst_pc, label = 'pih_lst_pc', color = colors["c"], alpha=0.3)
ax[2].plot(ah_lst_pc, pil_lst_pc, label = 'pil_lst_pc', color = colors["p-g"], alpha=0.3)
ax[2].text(70, 140, "$\pi_l^{tt}$", fontsize=18)
ax[2].text(50, 140, "$\pi_h^{tt}$", fontsize=18)
ax[2].set_ylim(0, 420)
ax[2].set(xticks=[41, 70, 99], xticklabels=['41', '70', '99'],
yticks=[0, 420], yticklabels=['0', '420'])
ax[2].set_xlabel('$\\theta_h$', fontsize=18)
ax[2].set_ylabel('profits', fontsize=18)
ax[2].yaxis.set_label_coords(-0.02, 0.5)
ax[2].set_title('profits tt vs. pc', fontsize=20)
#Axes4. welfare
ax[3].plot(ah_lst_tt, welfare_aggregate_adjusted_lst_tt, label = 'CS_aggregate_adjusted_lst_tt', color = colors["b-s"], alpha=1)
ax[3].plot(ah_lst_tt, welfare_aggregate_lst_tt, label = 'CS_aggregate_lst_tt', color = colors["c"], alpha=1)
ax[3].plot(ah_lst_pc, welfare_aggregate_adjusted_lst_pc, label = 'CS_aggregate_adjusted_lst_pc', color = colors["b-s"], alpha=0.3)
ax[3].plot(ah_lst_pc, welfare_aggregate_lst_pc, label = 'CS_aggregate_lst_pc', color = colors["c"], alpha=0.3)
ax[3].text(43, 500, "$welfare^{tt}$", fontsize=18)
ax[3].text(65, 440, "$welfare_{adjusted}^{tt}$", fontsize=18)
ax[3].set(xticks=[41, 70, 99], xticklabels=['41', '70', '99'],
yticks=[250, 800], yticklabels=['250', '800'])
ax[3].set_xlabel('$\\theta_h$', fontsize=18)
ax[3].set_ylabel('welfare adjusted', fontsize=18)
ax[3].yaxis.set_label_coords(-0.02, 0.5)
ax[3].set_title('welfare adjusted tt vs. pc', fontsize=20)
plt.show()
#Heatcolor map
N2 = 100
ah_lst = np.linspace(99, 41, N2)
al_lst = np.linspace(1, 19, N2)
#Initialize tt
El_lst_tt = []
Eh_lst_tt = []
E_lst_tt = []
CS_aggregate_adjusted_lst_tt = []
pil_lst_tt = []
pih_lst_tt = []
pi_aggregate_lst_tt = []
welfare_aggregate_adjusted_lst_tt = []
#Initialize pc
El_lst_pc = []
Eh_lst_pc = []
E_lst_pc = []
CS_aggregate_adjusted_lst_pc = []
pil_lst_pc = []
pih_lst_pc = []
pi_aggregate_lst_pc = []
welfare_aggregate_adjusted_lst_pc = []
for ah in ah_lst:
#temp_lst_tt = []
temp_El_lst_tt = []
temp_Eh_lst_tt = []
temp_E_lst_tt = []
temp_CS_aggregate_adjusted_lst_tt = []
temp_pil_lst_tt = []
temp_pih_lst_tt = []
temp_pi_aggregate_lst_tt = []
temp_welfare_aggregate_adjusted_lst_tt = []
#temp_lst_pc = []
temp_El_lst_pc = []
temp_Eh_lst_pc = []
temp_E_lst_pc = []
temp_CS_aggregate_adjusted_lst_pc = []
temp_pil_lst_pc = []
temp_pih_lst_pc = []
temp_pi_aggregate_lst_pc = []
temp_welfare_aggregate_adjusted_lst_pc = []
for al in al_lst:
Eh_tt, El_tt, E_tt, CS_capita_tt, CS_capita_adjusted_tt, CS_aggregate_tt, CS_aggregate_adjusted_tt, pil_tt, pih_tt, pi_aggregate_tt, welfare_aggregate_tt, welfare_aggregate_adjusted_tt=d_simulate_model(al, ah, kl, kh, T, t, P, N, model = 'tt', auction='discriminatory')
Eh_pc, El_pc, E_pc, CS_capita_pc, CS_capita_adjusted_pc, CS_aggregate_pc, CS_aggregate_adjusted_pc, pil_pc, pih_pc, pi_aggregate_pc, welfare_aggregate_pc, welfare_aggregate_adjusted_pc=d_simulate_model(al, ah, kl, kh, T, t, P, N, model = 'pc', auction='discriminatory')
#Append tt
temp_El_lst_tt.append(El_tt)
temp_Eh_lst_tt.append(Eh_tt)
temp_E_lst_tt.append(E_tt)
temp_CS_aggregate_adjusted_lst_tt.append(CS_aggregate_adjusted_tt)
temp_pil_lst_tt.append(pil_tt)
temp_pih_lst_tt.append(pih_tt)
temp_pi_aggregate_lst_tt.append(pi_aggregate_tt)
temp_welfare_aggregate_adjusted_lst_tt.append(welfare_aggregate_adjusted_tt)
#Append pc
temp_El_lst_pc.append(El_pc)
temp_Eh_lst_pc.append(Eh_pc)
temp_E_lst_pc.append(E_pc)
temp_CS_aggregate_adjusted_lst_pc.append(CS_aggregate_adjusted_pc)
temp_pil_lst_pc.append(pil_pc)
temp_pih_lst_pc.append(pih_pc)
temp_pi_aggregate_lst_pc.append(pi_aggregate_pc)
temp_welfare_aggregate_adjusted_lst_pc.append(welfare_aggregate_adjusted_pc)
#Append lst_tt
El_lst_tt.append(temp_El_lst_tt)
Eh_lst_tt.append(temp_Eh_lst_tt)
E_lst_tt.append(temp_E_lst_tt)
CS_aggregate_adjusted_lst_tt.append(temp_CS_aggregate_adjusted_lst_tt)
pil_lst_tt.append(temp_pil_lst_tt)
pih_lst_tt.append(temp_pih_lst_tt)
pi_aggregate_lst_tt.append(temp_pi_aggregate_lst_tt)
welfare_aggregate_adjusted_lst_tt.append(temp_welfare_aggregate_adjusted_lst_tt)
#Append lst_pc
El_lst_pc.append(temp_El_lst_pc)
Eh_lst_pc.append(temp_Eh_lst_pc)
E_lst_pc.append(temp_E_lst_pc)
CS_aggregate_adjusted_lst_pc.append(temp_CS_aggregate_adjusted_lst_pc)
pil_lst_pc.append(temp_pil_lst_pc)
pih_lst_pc.append(temp_pih_lst_pc)
pi_aggregate_lst_pc.append(temp_pi_aggregate_lst_pc)
welfare_aggregate_adjusted_lst_pc.append(temp_welfare_aggregate_adjusted_lst_pc)
#Array tt
El_array_tt = np.array(El_lst_tt)
Eh_array_tt = np.array(Eh_lst_tt)
E_array_tt = np.array(E_lst_tt)
CS_aggregate_adjusted_array_tt = np.array(CS_aggregate_adjusted_lst_tt)
pil_array_tt = np.array(pil_lst_tt)
pih_array_tt = np.array(pih_lst_tt)
pi_aggregate_array_tt = np.array(pi_aggregate_lst_tt)
welfare_aggregate_adjusted_array_tt = np.array(welfare_aggregate_adjusted_lst_tt)
#Array pc
El_array_pc = np.array(El_lst_pc)
Eh_array_pc = np.array(Eh_lst_pc)
E_array_pc = np.array(E_lst_pc)
CS_aggregate_adjusted_array_pc = np.array(CS_aggregate_adjusted_lst_pc)
pil_array_pc = np.array(pil_lst_pc)
pih_array_pc = np.array(pih_lst_pc)
pi_aggregate_array_pc = np.array(pi_aggregate_lst_pc)
welfare_aggregate_adjusted_array_pc = np.array(welfare_aggregate_adjusted_lst_pc)
#Array diff
El_diff = El_array_pc - El_array_tt
Eh__diff = Eh_array_pc - Eh_array_tt
E_diff = E_array_tt -E_array_pc
#CS. Prior belief: CS_tt>CS_pc
CS_aggregate_adjusted_diff = CS_aggregate_adjusted_array_tt - CS_aggregate_adjusted_array_pc
pil_diff = pil_array_pc - pil_array_tt
pih_diif = pih_array_pc - pih_array_tt
#pi. Prior belief: pi_tt>pi_pc
pi_aggregate_diff = pi_aggregate_array_tt - pi_aggregate_array_pc
#welfare. Prior belief: welfare_tt>welfare_pc
welfare_aggregate_adjusted_diff = welfare_aggregate_adjusted_array_tt - welfare_aggregate_adjusted_array_pc
#Heat color E, CS_aggregate_adjusted, profits, welfare_adjusted
import matplotlib.pyplot as plt
vmin_E = E_diff.min()
vmax_E = E_diff.max()
fig, ax = plt.subplots(ncols = 4, figsize = (20, 9))
c0 = ax[0].pcolormesh(al_lst, ah_lst, E_diff, cmap = 'viridis', vmin = vmin_E, vmax = vmax_E)
ax[0].set_position([0.05+(0.85/4)*0, 0.15, 0.15, 0.7])
ax[0].set(xticks=[1, 5, 10, 15, 19], xticklabels=['1', '5', '10', '15', '19'],
yticks=[41, 50, 60, 70, 80, 90, 99], yticklabels=['41', '50', '60', '70', '80', '90', '99'])
ax[0].set_ylabel('$\\theta_h$', fontsize=18)
ax[0].set_xlabel('$\\theta_l$', fontsize=18)
ax[0].set_title('price (tt-pc)', fontsize=20)
cbar_ax = fig.add_axes([0.05+(0.15*1)+0.01, 0.15, 0.01, 0.7])
fig.colorbar(c0, cax=cbar_ax)
#CS_aggregate_adjusted
vmin_CS = CS_aggregate_adjusted_diff.min()
vmax_CS = CS_aggregate_adjusted_diff.max()
c1 = ax[1].pcolormesh(al_lst, ah_lst, CS_aggregate_adjusted_diff, cmap = 'viridis', vmin = vmin_CS, vmax = vmax_CS)
ax[1].set_position([0.05+(0.85/4)*1, 0.15, 0.15, 0.7])
ax[1].set(xticks=[1, 5, 10, 15, 19], xticklabels=['1', '5', '10', '15', '19'],
yticks=[41, 50, 60, 70, 80, 90, 99], yticklabels=['41', '50', '60', '70', '80', '90', '99'])
ax[1].set_ylabel('$\\theta_h$', fontsize=18)
ax[1].set_xlabel('$\\theta_l$', fontsize=18)
ax[1].set_title('CS adjusted (tt-pc)', fontsize=20)
cbar_ax = fig.add_axes([0.05+(0.85/4)*1+(0.15)+0.01, 0.15, 0.01, 0.7])
fig.colorbar(c1, cax=cbar_ax)
#pi
vmin_pi = pi_aggregate_diff.min()
vmax_pi = pi_aggregate_diff.max()
c2 = ax[2].pcolormesh(al_lst, ah_lst, pi_aggregate_diff, cmap = 'viridis', vmin = vmin_pi, vmax = vmax_pi)
ax[2].set_position([0.05+(0.85/4)*2, 0.15, 0.15, 0.7])
ax[2].set(xticks=[1, 5, 10, 15, 19], xticklabels=['1', '5', '10', '15', '19'],
yticks=[41, 50, 60, 70, 80, 90, 99], yticklabels=['41', '50', '60', '70', '80', '90', '99'])
ax[2].set_ylabel('$\\theta_h$', fontsize=18)
ax[2].set_xlabel('$\\theta_l$', fontsize=18)
ax[2].set_title('profit (tt-pc)', fontsize=20)
cbar_ax = fig.add_axes([0.05+(0.85/4)*2+(0.15)+0.01, 0.15, 0.01, 0.7])
fig.colorbar(c2, cax=cbar_ax)
#welfare_aggregate_adjusted
vmin_w = welfare_aggregate_adjusted_diff.min()
vmax_w = welfare_aggregate_adjusted_diff.max()
c3 = ax[3].pcolormesh(al_lst, ah_lst, welfare_aggregate_adjusted_diff, cmap = 'viridis', vmin = vmin_w, vmax = vmax_w)
ax[3].set_position([0.05+(0.85/4)*3, 0.15, 0.15, 0.7])
ax[3].set(xticks=[1, 5, 10, 15, 19], xticklabels=['1', '5', '10', '15', '19'],
yticks=[41, 50, 60, 70, 80, 90, 99], yticklabels=['41', '50', '60', '70', '80', '90', '99'])
ax[3].set_ylabel('$\\theta_h$', fontsize=18)
ax[3].set_xlabel('$\\theta_l$', fontsize=18)
ax[3].set_title('welfare adjusted (tt-pc)', fontsize=20)
cbar_ax = fig.add_axes([0.05+(0.85/4)*3+(0.15)+0.01, 0.15, 0.01, 0.7])
fig.colorbar(c3, cax=cbar_ax)
plt.show()
#############################
### Uniform price auction ###
#############################
##Call the functions
#Determine the area
area, area_num = determine_area(al, ah)
#d_strategies_tt
u_Fh_tt, u_Fl_tt, u_p_tt, u_b1_tt, u_b2_tt = u_strategies_tt(al, ah, kl, kh, T, t, P, N)
u_Eh_tt, u_El_tt, u_E_tt, u_CS_capita, u_CS_capita_adjusted, u_CS_aggregate, u_CS_aggregate_adjusted, u_pil, u_pih, u_pi_aggregate, u_welfare_aggregate, u_welfare_aggregate_adjusted=u_welfare_tt(u_Fh_tt, u_Fl_tt, u_p_tt, al, ah, T, t, kl, kh, P)
#d_strategies_pc
u_Fh_pc, u_Fl_pc, u_p_pc, u_b1_pc, u_b2_pc = u_strategies_pc(al, ah, kl, kh, T, t, P, N)
u_Eh_pc, u_El_pc, u_E_pc, u_CS_capita, u_CS_capita_adjusted, u_CS_aggregate, u_CS_aggregate_adjusted, u_pil, u_pih, u_pi_aggregate, u_welfare_aggregate, u_welfare_aggregate_adjusted=u_welfare_pc(u_Fh_pc, u_Fl_pc, u_p_pc, al, ah, T, t, kl, kh, P)
#Welfare tt
u_ah_lst_tt, u_Eh_lst_tt, u_El_lst_tt, u_E_lst_tt, u_CS_capita_lst_tt, u_CS_capita_adjusted_lst_tt, u_CS_aggregate_lst_tt, u_CS_aggregate_adjusteu_lst_tt, u_pil_lst_tt, u_pih_lst_tt, u_pi_aggregate_lst_tt, u_welfare_aggregate_lst_tt, u_welfare_aggregate_adjusted_lst_tt=u_plot_welfare_tt(al, kl, kh, T, t, P, N, N2)
#Welfare pc
u_ah_lst_pc, u_Eh_lst_pc, u_El_lst_pc, u_E_lst_pc, u_CS_capita_lst_pc, u_CS_capita_adjusted_lst_pc, u_CS_aggregate_lst_pc, u_CS_aggregate_adjusted_lst_pc, pil_lst_pc, pih_lst_pc, pi_aggregate_lst_pc, welfare_aggregate_lst_pc, welfare_aggregate_adjusted_lst_pc=u_plot_welfare_pc(al, kl, kh, T, t, P, N, N2)
#Compare welfare
u_ah_lst_tt, u_Eh_lst_tt, u_El_lst_tt, u_E_lst_tt, u_CS_capita_lst_tt, u_CS_capita_adjusteu_lst_tt, u_CS_aggregatu_E_lst_tt, u_CS_aggregate_adjusted_lst_tt, u_pil_lst_tt, u_pih_lst_tt, u_pi_aggregate_lst_tt, u_welfare_aggregate_lst_tt, u_welfare_aggregate_adjusted_lst_tt=u_plot_welfare_tt(al, kl, kh, T, t, P, N, N2)
u_ah_lst_pc, u_Eh_lst_pc, u_El_lst_pc, u_E_lst_pc, u_CS_capita_lst_pc, u_CS_capita_adjusted_lst_pc, u_CS_aggregate_lst_pc, u_CS_aggregate_adjusted_lst_pc, u_pil_lst_pc, u_pih_lst_pc, u_pi_aggregate_lst_pc, u_welfare_aggregate_lst_pc, u_welfare_aggregate_adjusted_lst_pc=u_plot_welfare_pc(al, kl, kh, T, t, P, N, N2)
########################################
### Uniform price auction: tt vs. pc ###
########################################
##Plot the functions
import matplotlib.pyplot as plt
#u_strategies_tt
fig, ax = plt.subplots()
ax.plot(u_p_tt, u_Fh_tt, label = 'u_Fh_tt', color = colors["c"])
ax.plot(u_p_tt, u_Fl_tt, label = 'Fl_tt', color = colors["p-g"])
ax.plot([u_Eh_tt,u_Eh_tt], [0,1], label = 'u_Eh_tt', color = colors["c"])
ax.plot([u_El_tt,u_El_tt], [0,1], label = 'u_El_tt', color = colors["p-g"])
ax.plot([u_E_tt,u_E_tt], [0,1], label = 'E_tt', color = colors["o-y-c"])
#u_strategies_pc
fig, ax = plt.subplots()
ax.plot(u_p_pc, u_Fh_pc, label = 'u_Fh_pc', color = colors["c"], alpha=1)
ax.plot(u_p_pc, u_Fl_pc, label = 'Fl_pc', color = colors["p-g"], alpha=1)
ax.plot([u_Eh_pc,u_Eh_pc], [0,1], label = 'u_Eh_pc', color = colors["c"], alpha=1)
ax.plot([u_El_pc,u_El_pc], [0,1], label = 'u_El_pc', color = colors["p-g"], alpha=1)
ax.plot([u_E_pc,u_E_pc], [0,1], label = 'E_pc', color = colors["o-y-c"], alpha=1)
#u_strategies comparison
fig, ax = plt.subplots()
ax.plot(u_p_tt, u_Fh_tt, label = 'u_Fh_tt', color = colors["c"])
ax.plot(u_p_tt, u_Fl_tt, label = 'u_Fl_tt', color = colors["p-g"])
ax.plot([u_Eh_tt,u_Eh_tt], [0,1], label = 'u_Eh_tt', color = colors["c"])
ax.plot([u_El_tt,u_El_tt], [0,1], label = 'u_El_tt', color = colors["p-g"])
ax.plot([u_E_tt,u_E_tt], [0,1], label = 'E_tt', color = colors["o-y-c"])
ax.plot(u_p_pc, u_Fh_pc, label = 'u_Fh_pc', color = colors["c"], alpha=0.2)
ax.plot(u_p_pc, u_Fl_pc, label = 'u_Fl_pc', color = colors["p-g"], alpha=0.2)
ax.plot([u_Eh_pc,u_Eh_pc], [0,1], label = 'u_Eh_pc', color = colors["c"], alpha=0.2)
ax.plot([u_El_pc,u_El_pc], [0,1], label = 'u_El_pc', color = colors["p-g"], alpha=0.2)
ax.plot([u_E_pc,u_E_pc], [0,1], label = 'E_pc', color = colors["o-y-c"], alpha=0.2)
#Welfare tt
fig, ax = plt.subplots()
ax.plot(u_ah_lst_tt, u_Eh_lst_tt, label = 'u_Eh_lst_tt', color = colors["c"], alpha=1)
ax.plot(u_ah_lst_tt, u_El_lst_tt, label = 'u_El_lst_tt', color = colors["p-g"], alpha=1)
ax.plot(u_ah_lst_tt, u_E_lst_tt, label = 'u_E_lst_tt', color = colors["o-y-c"], alpha=1)
fig, ax = plt.subplots()
ax.plot(u_ah_lst_tt, u_CS_aggregate_adjusted_lst_tt, label = 'u_CS_aggregate_adjusted_lst_tt', color = colors["b-s"], alpha=1)
ax.plot(u_ah_lst_tt, u_CS_aggregate_lst_tt, label = 'u_CS_aggregate_lst_tt', color = colors["c"], alpha=1)
fig, ax = plt.subplots()
ax.plot(u_ah_lst_tt, u_pil_lst_tt, label = 'u_pil_lst_tt', color = colors["c"], alpha=1)
ax.plot(u_ah_lst_tt, u_pih_lst_tt, label = 'u_pih_lst_tt', color = colors["p-g"], alpha=1)
fig, ax = plt.subplots()
ax.plot(u_ah_lst_tt, u_welfare_aggregate_adjusted_lst_tt, label = 'u_CS_aggregate_adjusted_lst_tt', color = colors["b-s"], alpha=1)
ax.plot(u_ah_lst_tt, u_welfare_aggregate_lst_tt, label = 'u_CS_aggregate_lst_tt', color = colors["c"], alpha=1)
#Welfare pc
fig, ax = plt.subplots()
ax.plot(u_ah_lst_pc, u_Eh_lst_pc, label = 'u_Eh_lst_pc', color = colors["c"], alpha=0.2)
ax.plot(u_ah_lst_pc, u_El_lst_pc, label = 'u_El_lst_pc', color = colors["p-g"], alpha=0.2)
ax.plot(u_ah_lst_pc, u_E_lst_pc, label = 'u_E_lst_pc', color = colors["o-y-c"], alpha=0.2)
fig, ax = plt.subplots()
ax.plot(u_ah_lst_pc, u_CS_aggregate_adjusted_lst_pc, label = 'u_CS_aggregate_adjusted_lst_pc', color = colors["b-s"], alpha=0.2)
ax.plot(u_ah_lst_pc, u_CS_aggregate_lst_pc, label = 'u_CS_aggregate_lst_pc', color = colors["c"], alpha=0.2)
fig, ax = plt.subplots()
ax.plot(u_ah_lst_pc, u_pil_lst_pc, label = 'u_pil_lst_pc', color = colors["c"], alpha=0.2)
ax.plot(u_ah_lst_pc, u_pih_lst_pc, label = 'u_pih_lst_pc', color = colors["p-g"], alpha=0.2)
fig, ax = plt.subplots()
ax.plot(u_ah_lst_pc, welfare_aggregate_adjusted_lst_pc, label = 'u_CS_aggregate_adjusted_lst_pc', color = colors["b-s"], alpha=0.2)
ax.plot(u_ah_lst_pc, u_welfare_aggregate_lst_pc, label = 'u_CS_aggregate_lst_pc', color = colors["c"], alpha=0.2)
#Compare u_welfare
fig, ax = plt.subplots()
ax.plot(u_ah_lst_tt, u_Eh_lst_tt, label = 'u_Eh_lst_tt', color = colors["c"], alpha=1)
ax.plot(u_ah_lst_tt, u_El_lst_tt, label = 'u_El_lst_tt', color = colors["p-g"], alpha=1)
ax.plot(u_ah_lst_tt, u_E_lst_tt, label = 'u_E_lst_tt', color = colors["o-y-c"], alpha=1)
ax.plot(u_ah_lst_pc, u_Eh_lst_pc, label = 'u_Eh_lst_pc', color = colors["c"], alpha=0.2)
ax.plot(u_ah_lst_pc, u_El_lst_pc, label = 'u_El_lst_pc', color = colors["p-g"], alpha=0.2)
ax.plot(u_ah_lst_pc, u_E_lst_pc, label = 'u_E_lst_pc', color = colors["o-y-c"], alpha=0.2)
fig, ax = plt.subplots()
ax.plot(u_ah_lst_tt, u_CS_aggregate_adjusted_lst_tt, label = 'u_CS_aggregate_adjusted_lst_tt', color = colors["b-s"], alpha=1)
ax.plot(u_ah_lst_tt, u_CS_aggregate_lst_tt, label = 'u_CS_aggregate_lst_tt', color = colors["c"], alpha=1)
ax.plot(u_ah_lst_pc, u_CS_aggregate_adjusted_lst_pc, label = 'u_CS_aggregate_adjusted_lst_pc', color = colors["b-s"], alpha=0.2)
ax.plot(u_ah_lst_pc, u_CS_aggregate_lst_pc, label = 'u_CS_aggregate_lst_pc', color = colors["c"], alpha=0.2)
fig, ax = plt.subplots()
ax.plot(u_ah_lst_tt, u_pil_lst_tt, label = 'u_pil_lst_tt', color = colors["c"], alpha=1)
ax.plot(u_ah_lst_tt, u_pih_lst_tt, label = 'u_pih_lst_tt', color = colors["p-g"], alpha=1)
ax.plot(u_ah_lst_pc, u_pil_lst_pc, label = 'u_pil_lst_pc', color = colors["c"], alpha=0.2)
ax.plot(u_ah_lst_pc, u_pih_lst_pc, label = 'u_pih_lst_pc', color = colors["p-g"], alpha=0.2)
fig, ax = plt.subplots()
ax.plot(u_ah_lst_tt, u_welfare_aggregate_adjusted_lst_tt, label = 'u_CS_aggregate_adjusted_lst_tt', color = colors["b-s"], alpha=1)
ax.plot(u_ah_lst_tt, u_welfare_aggregate_lst_tt, label = 'u_CS_aggregate_lst_tt', color = colors["c"], alpha=1)
ax.plot(u_ah_lst_pc, u_welfare_aggregate_adjusted_lst_pc, label = 'u_CS_aggregate_adjusted_lst_pc', color = colors["b-s"], alpha=0.2)
ax.plot(u_ah_lst_pc, u_welfare_aggregate_lst_pc, label = 'u_CS_aggregate_lst_pc', color = colors["c"], alpha=0.2)
#Heatcolor map
N2 = 100
u_ah_lst = np.linspace(99, 41, N2)
u_al_lst = np.linspace(1, 19, N2)
#Initialize tt
u_El_lst_tt = []
u_Eh_lst_tt = []
u_E_lst_tt = []
u_CS_aggregate_adjusted_lst_tt = []
u_pil_lst_tt = []
u_pih_lst_tt = []
u_pi_aggregate_lst_tt = []
u_welfare_aggregate_adjusted_lst_tt = []
#Initialize pc
u_El_lst_pc = []
u_Eh_lst_pc = []
u_E_lst_pc = []
u_CS_aggregate_adjusted_lst_pc = []
u_pil_lst_pc = []
u_pih_lst_pc = []
u_pi_aggregate_lst_pc = []
u_welfare_aggregate_adjusted_lst_pc = []
for ah in u_ah_lst:
#temp_lst_tt = []
u_temp_El_lst_tt = []
u_temp_Eh_lst_tt = []
u_temp_E_lst_tt = []
u_temp_CS_aggregate_adjusted_lst_tt = []
u_temp_pil_lst_tt = []
u_temp_pih_lst_tt = []
u_temp_pi_aggregate_lst_tt = []
u_temp_welfare_aggregate_adjusted_lst_tt = []
#u_temp_lst_pc = []
u_temp_El_lst_pc = []
u_temp_Eh_lst_pc = []
u_temp_E_lst_pc = []
u_temp_CS_aggregate_adjusted_lst_pc = []
u_temp_pil_lst_pc = []
u_temp_pih_lst_pc = []
u_temp_pi_aggregate_lst_pc = []
u_temp_welfare_aggregate_adjusted_lst_pc = []
for al in u_al_lst:
u_Eh_tt, u_El_tt, u_E_tt, u_CS_capita_tt, u_CS_capita_adjusted_tt, u_CS_aggregate_tt, u_CS_aggregate_adjusted_tt, u_pil_tt, u_pih_tt, u_pi_aggregate_tt, u_welfare_aggregate_tt, u_welfare_aggregate_adjusted_tt=d_simulate_model(al, ah, kl, kh, T, t, P, N, model = 'tt', auction='uniform')
u_Eh_pc, u_El_pc, u_E_pc, u_CS_capita_pc, u_CS_capita_adjusted_pc, u_CS_aggregate_pc, u_CS_aggregate_adjusted_pc, u_pil_pc, u_pih_pc, u_pi_aggregate_pc, u_welfare_aggregate_pc, u_welfare_aggregate_adjusted_pc=d_simulate_model(al, ah, kl, kh, T, t, P, N, model = 'pc', auction='uniform')
#Append tt
u_temp_El_lst_tt.append(u_El_tt)
u_temp_Eh_lst_tt.append(u_Eh_tt)
u_temp_E_lst_tt.append(u_E_tt)
u_temp_CS_aggregate_adjusted_lst_tt.append(u_CS_aggregate_adjusted_tt)
u_temp_pil_lst_tt.append(u_pil_tt)
u_temp_pih_lst_tt.append(u_pih_tt)
u_temp_pi_aggregate_lst_tt.append(u_pi_aggregate_tt)
u_temp_welfare_aggregate_adjusted_lst_tt.append(u_welfare_aggregate_adjusted_tt)
#Append pc
u_temp_El_lst_pc.append(u_El_pc)
u_temp_Eh_lst_pc.append(u_Eh_pc)
u_temp_E_lst_pc.append(u_E_pc)
u_temp_CS_aggregate_adjusted_lst_pc.append(u_CS_aggregate_adjusted_pc)
u_temp_pil_lst_pc.append(u_pil_pc)
u_temp_pih_lst_pc.append(u_pih_pc)
u_temp_pi_aggregate_lst_pc.append(u_pi_aggregate_pc)
u_temp_welfare_aggregate_adjusted_lst_pc.append(u_welfare_aggregate_adjusted_pc)
#Append lst_tt
u_El_lst_tt.append(u_temp_El_lst_tt)
u_Eh_lst_tt.append(u_temp_Eh_lst_tt)
u_E_lst_tt.append(u_temp_E_lst_tt)
u_CS_aggregate_adjusted_lst_tt.append(u_temp_CS_aggregate_adjusted_lst_tt)
u_pil_lst_tt.append(u_temp_pil_lst_tt)
u_pih_lst_tt.append(u_temp_pih_lst_tt)
u_pi_aggregate_lst_tt.append(u_temp_pi_aggregate_lst_tt)
u_welfare_aggregate_adjusted_lst_tt.append(u_temp_welfare_aggregate_adjusted_lst_tt)
#Append lst_pc
u_El_lst_pc.append(u_temp_El_lst_pc)
u_Eh_lst_pc.append(u_temp_Eh_lst_pc)
u_E_lst_pc.append(u_temp_E_lst_pc)
u_CS_aggregate_adjusted_lst_pc.append(u_temp_CS_aggregate_adjusted_lst_pc)
u_pil_lst_pc.append(u_temp_pil_lst_pc)
u_pih_lst_pc.append(u_temp_pih_lst_pc)
u_pi_aggregate_lst_pc.append(u_temp_pi_aggregate_lst_pc)
u_welfare_aggregate_adjusted_lst_pc.append(u_temp_welfare_aggregate_adjusted_lst_pc)
#Array tt
u_El_array_tt = np.array(u_El_lst_tt)
u_Eh_array_tt = np.array(u_Eh_lst_tt)
u_E_array_tt = np.array(u_E_lst_tt)
u_CS_aggregate_adjusted_array_tt = np.array(u_CS_aggregate_adjusted_lst_tt)
u_pil_array_tt = np.array(u_pil_lst_tt)
u_pih_array_tt = np.array(u_pih_lst_tt)
u_pi_aggregate_array_tt = np.array(u_pi_aggregate_lst_tt)
u_welfare_aggregate_adjusted_array_tt = np.array(u_welfare_aggregate_adjusted_lst_tt)
#Array pc
u_El_array_pc = np.array(u_El_lst_pc)
u_Eh_array_pc = np.array(u_Eh_lst_pc)
u_E_array_pc = np.array(u_E_lst_pc)
u_CS_aggregate_adjusted_array_pc = np.array(u_CS_aggregate_adjusted_lst_pc)
u_pil_array_pc = np.array(u_pil_lst_pc)
u_pih_array_pc = np.array(u_pih_lst_pc)
u_pi_aggregate_array_pc = np.array(u_pi_aggregate_lst_pc)
u_welfare_aggregate_adjusted_array_pc = np.array(u_welfare_aggregate_adjusted_lst_pc)
#Array diff
u_El_diff = u_El_array_pc - u_El_array_tt
u_Eh__diff = u_Eh_array_pc - u_Eh_array_tt
u_E_diff = u_E_array_tt -u_E_array_pc
#CS. Prior belief: CS_tt>CS_pc
u_CS_aggregate_adjusted_diff = u_CS_aggregate_adjusted_array_tt - u_CS_aggregate_adjusted_array_pc
u_pil_diff = u_pil_array_pc - u_pil_array_tt
u_pih_diif = u_pih_array_pc - u_pih_array_tt
#pi. Prior belief: pi_tt>pi_pc
u_pi_aggregate_diff = u_pi_aggregate_array_tt - u_pi_aggregate_array_pc
#welfare. Prior belief: welfare_tt>welfare_pc
u_welfare_aggregate_adjusted_diff = u_welfare_aggregate_adjusted_array_tt - u_welfare_aggregate_adjusted_array_pc
#Heat color E, CS_aggregate_adjusted,
u_vmin_E = u_E_diff.min()
u_vmax_E = u_E_diff.max()
fig, ax = plt.subplots(ncols = 4, figsize = (20, 9))
u_c0 = ax[0].pcolormesh(u_al_lst, u_ah_lst, u_E_diff, cmap = 'viridis', vmin = u_vmin_E, vmax = u_vmax_E)
ax[0].set_position([0.05+(0.85/4)*0, 0.15, 0.15, 0.7])
ax[0].set(xticks=[1, 5, 10, 15, 19], xticklabels=['1', '5', '10', '15', '19'],
yticks=[41, 50, 60, 70, 80, 90, 99], yticklabels=['41', '50', '60', '70', '80', '90', '99'])
ax[0].set_ylabel('$\\theta_h$', fontsize=18)
ax[0].set_xlabel('$\\theta_l$', fontsize=18)
ax[0].set_title('price (tt-pc)', fontsize=20)
cbar_ax = fig.add_axes([0.05+(0.15*1)+0.01, 0.15, 0.01, 0.7])
fig.colorbar(u_c0, cax=cbar_ax)
#CS_aggregate_adjusted
u_vmin_CS = u_CS_aggregate_adjusted_diff.min()
u_vmax_CS = u_CS_aggregate_adjusted_diff.max()
u_c1 = ax[1].pcolormesh(u_al_lst, u_ah_lst, u_CS_aggregate_adjusted_diff, cmap = 'viridis', vmin = u_vmin_CS, vmax = u_vmax_CS)
ax[1].set_position([0.05+(0.85/4)*1, 0.15, 0.15, 0.7])
ax[1].set(xticks=[1, 5, 10, 15, 19], xticklabels=['1', '5', '10', '15', '19'],
yticks=[41, 50, 60, 70, 80, 90, 99], yticklabels=['41', '50', '60', '70', '80', '90', '99'])
ax[1].set_ylabel('$\\theta_h$', fontsize=18)
ax[1].set_xlabel('$\\theta_l$', fontsize=18)
ax[1].set_title('CS adjusted (tt-pc)', fontsize=20)
cbar_ax = fig.add_axes([0.05+(0.85/4)*1+(0.15)+0.01, 0.15, 0.01, 0.7])
fig.colorbar(u_c1, cax=cbar_ax)
#pi
u_vmin_pi = u_pi_aggregate_diff.min()
u_vmax_pi = u_pi_aggregate_diff.max()
u_c2 = ax[2].pcolormesh(u_al_lst, u_ah_lst, u_pi_aggregate_diff, cmap = 'viridis', vmin = u_vmin_pi, vmax = u_vmax_pi)
ax[2].set_position([0.05+(0.85/4)*2, 0.15, 0.15, 0.7])
ax[2].set(xticks=[1, 5, 10, 15, 19], xticklabels=['1', '5', '10', '15', '19'],
yticks=[41, 50, 60, 70, 80, 90, 99], yticklabels=['41', '50', '60', '70', '80', '90', '99'])
ax[2].set_ylabel('$\\theta_h$', fontsize=18)
ax[2].set_xlabel('$\\theta_l$', fontsize=18)
ax[2].set_title('profit (tt-pc)', fontsize=20)
cbar_ax = fig.add_axes([0.05+(0.85/4)*2+(0.15)+0.01, 0.15, 0.01, 0.7])
fig.colorbar(u_c2, cax=cbar_ax)
#welfare_aggregate_adjusted
u_vmin_w = u_welfare_aggregate_adjusted_diff.min()
u_vmax_w = u_welfare_aggregate_adjusted_diff.max()
u_c3 = ax[3].pcolormesh(u_al_lst, u_ah_lst, u_welfare_aggregate_adjusted_diff, cmap = 'viridis', vmin = u_vmin_w, vmax = u_vmax_w)
ax[3].set_position([0.05+(0.85/4)*3, 0.15, 0.15, 0.7])
ax[3].set(xticks=[1, 5, 10, 15, 19], xticklabels=['1', '5', '10', '15', '19'],
yticks=[41, 50, 60, 70, 80, 90, 99], yticklabels=['41', '50', '60', '70', '80', '90', '99'])
ax[3].set_ylabel('$\\theta_h$', fontsize=18)
ax[3].set_xlabel('$\\theta_l$', fontsize=18)
ax[3].set_title('welfare adjusted (tt-pc)', fontsize=20)
cbar_ax = fig.add_axes([0.05+(0.85/4)*3+(0.15)+0.01, 0.15, 0.01, 0.7])
fig.colorbar(u_c3, cax=cbar_ax)
plt.show()
####################################################
### Dsicriminatory vs uniform price auction (tt) ###
####################################################
#Array diff discriminatory tt - uniform tt
du_El_diff = u_El_array_tt - El_array_tt
du_Eh__diff = u_Eh_array_tt - Eh_array_tt
du_E_diff = u_E_array_tt - E_array_tt
#CS. Prior belief: CS_tt>CS_pc
du_CS_aggregate_adjusted_diff = u_CS_aggregate_adjusted_array_tt - CS_aggregate_adjusted_array_tt
du_pil_diff = u_pil_array_tt - pil_array_tt
du_pih_diff = u_pih_array_tt - u_pih_array_tt
#pi. Prior belief: pi_tt>pi_pc
du_pi_aggregate_diff = u_pi_aggregate_array_tt - pi_aggregate_array_tt
#welfare. Prior belief: welfare_tt>welfare_pc
du_welfare_aggregate_adjusted_diff = u_welfare_aggregate_adjusted_array_tt - welfare_aggregate_adjusted_array_tt
#Heat color E, CS_aggregate_adjusted, profits, welfare_adjusted
import matplotlib.pyplot as plt
du_vmin_E = du_E_diff.min()
du_vmax_E = du_E_diff.max()
fig, ax = plt.subplots(ncols = 4, figsize = (20, 9))
du_c0 = ax[0].pcolormesh(al_lst, ah_lst, du_E_diff, cmap = 'viridis', vmin = du_vmin_E, vmax = du_vmax_E)
ax[0].set_position([0.05+(0.85/4)*0, 0.15, 0.15, 0.7])
ax[0].set(xticks=[1, 5, 10, 15, 19], xticklabels=['1', '5', '10', '15', '19'],
yticks=[41, 50, 60, 70, 80, 90, 99], yticklabels=['41', '50', '60', '70', '80', '90', '99'])
ax[0].set_ylabel('$\\theta_h$', fontsize=18)
ax[0].set_xlabel('$\\theta_l$', fontsize=18)
ax[0].set_title('price (u-d)(tt)', fontsize=20)
cbar_ax = fig.add_axes([0.05+(0.15*1)+0.01, 0.15, 0.01, 0.7])
fig.colorbar(du_c0, cax=cbar_ax)
#CS_aggregate_adjusted
du_vmin_CS = du_CS_aggregate_adjusted_diff.min()
du_vmax_CS = du_CS_aggregate_adjusted_diff.max()
du_c1 = ax[1].pcolormesh(al_lst, ah_lst, du_CS_aggregate_adjusted_diff, cmap = 'viridis', vmin = du_vmin_CS, vmax = du_vmax_CS)
ax[1].set_position([0.05+(0.85/4)*1, 0.15, 0.15, 0.7])
ax[1].set(xticks=[1, 5, 10, 15, 19], xticklabels=['1', '5', '10', '15', '19'],
yticks=[41, 50, 60, 70, 80, 90, 99], yticklabels=['41', '50', '60', '70', '80', '90', '99'])
ax[1].set_ylabel('$\\theta_h$', fontsize=18)
ax[1].set_xlabel('$\\theta_l$', fontsize=18)
ax[1].set_title('CS adjusted (u-d)(tt)', fontsize=20)
cbar_ax = fig.add_axes([0.05+(0.85/4)*1+(0.15)+0.01, 0.15, 0.01, 0.7])
fig.colorbar(du_c1, cax=cbar_ax)
#pi
du_vmin_pi = du_pi_aggregate_diff.min()
du_vmax_pi = du_pi_aggregate_diff.max()
du_c2 = ax[2].pcolormesh(al_lst, ah_lst, du_pi_aggregate_diff, cmap = 'viridis', vmin = du_vmin_pi, vmax = du_vmax_pi)
ax[2].set_position([0.05+(0.85/4)*2, 0.15, 0.15, 0.7])
ax[2].set(xticks=[1, 5, 10, 15, 19], xticklabels=['1', '5', '10', '15', '19'],
yticks=[41, 50, 60, 70, 80, 90, 99], yticklabels=['41', '50', '60', '70', '80', '90', '99'])
ax[2].set_ylabel('$\\theta_h$', fontsize=18)
ax[2].set_xlabel('$\\theta_l$', fontsize=18)
ax[2].set_title('profit (u-d)(tt)', fontsize=20)
cbar_ax = fig.add_axes([0.05+(0.85/4)*2+(0.15)+0.01, 0.15, 0.01, 0.7])
fig.colorbar(du_c2, cax=cbar_ax)
#welfare_aggregate_adjusted
du_vmin_w = du_welfare_aggregate_adjusted_diff.min()
du_vmax_w = du_welfare_aggregate_adjusted_diff.max()
du_c3 = ax[3].pcolormesh(al_lst, ah_lst, du_welfare_aggregate_adjusted_diff, cmap = 'viridis', vmin = du_vmin_w, vmax = du_vmax_w)
ax[3].set_position([0.05+(0.85/4)*3, 0.15, 0.15, 0.7])
ax[3].set(xticks=[1, 5, 10, 15, 19], xticklabels=['1', '5', '10', '15', '19'],
yticks=[41, 50, 60, 70, 80, 90, 99], yticklabels=['41', '50', '60', '70', '80', '90', '99'])
ax[3].set_ylabel('$\\theta_h$', fontsize=18)
ax[3].set_xlabel('$\\theta_l$', fontsize=18)
ax[3].set_title('welfare adjusted (u-d)(tt)', fontsize=20)
cbar_ax = fig.add_axes([0.05+(0.85/4)*3+(0.15)+0.01, 0.15, 0.01, 0.7])
fig.colorbar(du_c3, cax=cbar_ax)
plt.show()
####################################################
### Dsicriminatory vs uniform price auction (pc) ###
####################################################
#Array diff discriminatory tt - uniform tt
du_El_diff_pc = u_El_array_pc - El_array_pc
du_Eh__diff_pc = u_Eh_array_pc - Eh_array_pc
du_E_diff_pc = u_E_array_pc - E_array_pc
#CS. Prior belief: CS_tt>CS_pc
du_CS_aggregate_adjusted_diff_pc = u_CS_aggregate_adjusted_array_pc - CS_aggregate_adjusted_array_pc
du_pil_diff_pc = u_pil_array_pc - pil_array_pc
du_pih_diff_pc = u_pih_array_pc - u_pih_array_pc
#pi. Prior belief: pi_tt>pi_pc
du_pi_aggregate_diff_pc = u_pi_aggregate_array_pc - pi_aggregate_array_pc
#welfare. Prior belief: welfare_tt>welfare_pc
du_welfare_aggregate_adjusted_diff_pc = u_welfare_aggregate_adjusted_array_pc - welfare_aggregate_adjusted_array_pc
#Heat color E, CS_aggregate_adjusted, profits, welfare_adjusted
import matplotlib.pyplot as plt
du_vmin_E_pc = du_E_diff_pc.min()
du_vmax_E_pc = du_E_diff_pc.max()
fig, ax = plt.subplots(ncols = 4, figsize = (20, 9))
du_c0_pc = ax[0].pcolormesh(al_lst, ah_lst, du_E_diff_pc, cmap = 'viridis', vmin = du_vmin_E_pc, vmax = du_vmax_E_pc)
ax[0].set_position([0.05+(0.85/4)*0, 0.15, 0.15, 0.7])
ax[0].set(xticks=[1, 5, 10, 15, 19], xticklabels=['1', '5', '10', '15', '19'],
yticks=[41, 50, 60, 70, 80, 90, 99], yticklabels=['41', '50', '60', '70', '80', '90', '99'])
ax[0].set_ylabel('$\\theta_h$', fontsize=18)
ax[0].set_xlabel('$\\theta_l$', fontsize=18)
ax[0].set_title('price (u-d)(pc)', fontsize=20)
cbar_ax = fig.add_axes([0.05+(0.15*1)+0.01, 0.15, 0.01, 0.7])
fig.colorbar(du_c0_pc, cax=cbar_ax)
#CS_aggregate_adjusted
du_vmin_CS_pc = du_CS_aggregate_adjusted_diff_pc.min()
du_vmax_CS_pc = du_CS_aggregate_adjusted_diff_pc.max()
du_c1_pc = ax[1].pcolormesh(al_lst, ah_lst, du_CS_aggregate_adjusted_diff_pc, cmap = 'viridis', vmin = du_vmin_CS_pc, vmax = du_vmax_CS_pc)
ax[1].set_position([0.05+(0.85/4)*1, 0.15, 0.15, 0.7])
ax[1].set(xticks=[1, 5, 10, 15, 19], xticklabels=['1', '5', '10', '15', '19'],
yticks=[41, 50, 60, 70, 80, 90, 99], yticklabels=['41', '50', '60', '70', '80', '90', '99'])
ax[1].set_ylabel('$\\theta_h$', fontsize=18)
ax[1].set_xlabel('$\\theta_l$', fontsize=18)
ax[1].set_title('CS adjusted (u-d)(pc)', fontsize=20)
cbar_ax = fig.add_axes([0.05+(0.85/4)*1+(0.15)+0.01, 0.15, 0.01, 0.7])
fig.colorbar(du_c1_pc, cax=cbar_ax)
#pi
du_vmin_pi_pc = du_pi_aggregate_diff_pc.min()
du_vmax_pi_pc = du_pi_aggregate_diff_pc.max()
du_c2_pc = ax[2].pcolormesh(al_lst, ah_lst, du_pi_aggregate_diff_pc, cmap = 'viridis', vmin = du_vmin_pi_pc, vmax = du_vmax_pi_pc)
ax[2].set_position([0.05+(0.85/4)*2, 0.15, 0.15, 0.7])
ax[2].set(xticks=[1, 5, 10, 15, 19], xticklabels=['1', '5', '10', '15', '19'],
yticks=[41, 50, 60, 70, 80, 90, 99], yticklabels=['41', '50', '60', '70', '80', '90', '99'])
ax[2].set_ylabel('$\\theta_h$', fontsize=18)
ax[2].set_xlabel('$\\theta_l$', fontsize=18)
ax[2].set_title('profit (u-d)(pc)', fontsize=20)
cbar_ax = fig.add_axes([0.05+(0.85/4)*2+(0.15)+0.01, 0.15, 0.01, 0.7])
fig.colorbar(du_c2_pc, cax=cbar_ax)
#welfare_aggregate_adjusted
du_vmin_w_pc = du_welfare_aggregate_adjusted_diff_pc.min()
du_vmax_w_pc = du_welfare_aggregate_adjusted_diff_pc.max()
du_c3_pc = ax[3].pcolormesh(al_lst, ah_lst, du_welfare_aggregate_adjusted_diff_pc, cmap = 'viridis', vmin = du_vmin_w_pc, vmax = du_vmax_w_pc)
ax[3].set_position([0.05+(0.85/4)*3, 0.15, 0.15, 0.7])
ax[3].set(xticks=[1, 5, 10, 15, 19], xticklabels=['1', '5', '10', '15', '19'],
yticks=[41, 50, 60, 70, 80, 90, 99], yticklabels=['41', '50', '60', '70', '80', '90', '99'])
ax[3].set_ylabel('$\\theta_h$', fontsize=18)
ax[3].set_xlabel('$\\theta_l$', fontsize=18)
ax[3].set_title('welfare adjusted (u-d)(pc)', fontsize=20)
cbar_ax = fig.add_axes([0.05+(0.85/4)*3+(0.15)+0.01, 0.15, 0.01, 0.7])
fig.colorbar(du_c3_pc, cax=cbar_ax)
plt.show()
| StarcoderdataPython |
180385 | <reponame>fvenya7/examen
from tkinter import Tk,Frame,Label,Button,Entry,Scale,StringVar,IntVar,Toplevel,ttk
import tkinter as tk
import catalogo
from editar_excel import list1
class Ventana_Principal(Frame):
def __init__(self,master=None):
super().__init__(master, width=600, height=400)
self.master=master
self.pack()
self.crea_widgets()
self.nombre= tk.StringVar()
self.stock= tk.StringVar()
self.precio= tk.StringVar()
self.codigo= tk.StringVar()
self.lista_nombres = []
self.lista_precio = []
self.lista_codigo = []
self.lista_stock = []
self.sumatotal=tk.StringVar()
def Caja(self):
self.caja = Toplevel()
self.caja.geometry("910x500")
tk.Label(self.caja, text="INGRESE LOS CODIGOS DEL LOS PRODUCTOS",bg="light yellow", font=("Verdana",18)).place(x=30,y=10,width=850, height=50)
tk.Label(self.caja, text="CODIGO",bg="black",fg="white").place(x=30,y=70,width=120, height=30)
tk.Label(self.caja, text="NOMBRE",bg="black",fg="white").place(x=155,y=70,width=200, height=30)
tk.Label(self.caja, text="PRECIO",bg="black",fg="white").place(x=360,y=70,width=100, height=30)
tk.Label(self.caja, text="STOCK",bg="black",fg="white").place(x=465,y=70,width=100, height=30)
tk.Label(self.caja, text="VERIFICAR",bg="black",fg="white").place(x=570,y=70,width=100, height=30)
tk.Label(self.caja, text="CANTIDAD",bg="black",fg="white").place(x=675,y=70,width=100, height=30)
tk.Label(self.caja, text="SUBTOTAL",bg="black",fg="white").place(x=780,y=70,width=100, height=30)
tk.Label(self.caja, textvariable=self.sumatotal).place(x=780,y=330,width=100, height=30)
i=0
self.botoncalc = tk.Button(self.caja, text="CALCULAR",command=self.Calcular)
self.botoncalc.place(x=780,y=440,width=100, height=30)
self.lista_entradas = []
self.lista_botones = []
self.lista_cant = []
self.lista_subtotal = []
for i in range(5):
self.lista_entradas.append(tk.Entry(self.caja))
self.lista_entradas[i].place_configure(x=30,y=110+40*i,width=100,height=30)
self.lista_botones.append(tk.Button(self.caja, text="CLICK",command=lambda a = i: self.Encontrar_nombre(a),bg="lime green"))
self.lista_botones[i].place_configure(x=570,y=110+40*i,width=100,height=30)
self.lista_cant.append(tk.Entry(self.caja))
self.lista_cant[i].place_configure(x=675,y=110+40*i,width=100, height=30)
self.lista_nombres.append(tk.StringVar())
self.lista_precio.append(tk.StringVar())
self.lista_stock.append(tk.StringVar())
self.lista_subtotal.append(tk.StringVar())
tk.Label(self.caja,textvariable=self.lista_nombres[i],bg="LightSkyBlue3").place(x=155,y=110+40*i,width=200, height=30)
tk.Label(self.caja,textvariable=self.lista_precio[i],bg="LightSkyBlue3").place(x=360,y=110+40*i,width=100, height=30)
tk.Label(self.caja,textvariable=self.lista_stock[i],bg="LightSkyBlue3").place(x=465,y=110+40*i,width=100, height=30)
tk.Label(self.caja,textvariable=self.lista_subtotal[i],bg="LightSkyBlue3").place(x=780,y=110+40*i,width=100, height=30)
self.diccionario = dict(zip([0,1,2,3,4],self.lista_entradas))
def Encontrar_nombre(self,numero):
if self.diccionario[numero].get() != "":
for i in list1:
if i[1] == self.diccionario[numero].get():
self.lista_nombres[numero].set(i[0])
self.lista_precio[numero].set(i[3])
self.lista_stock[numero].set(i[5])
def Calcular(self):
k = 0
a,b,suma = 0.0,0.0,0.0
for i in self.lista_subtotal:
try:
a = float(self.lista_cant[k].get())
b = float(self.lista_precio[k].get())
except:
a,b = 0.0,0.0
pass
if self.lista_cant[k].get() == "":
a = 0.0
if self.lista_precio[k].get() == "":
b = 0.0
k += 1
suma+=a*b
self.sumatotal.set(suma)
i.set(str(a*b))
def crea_widgets(self):
tk.Label(self,text="Bienvenido!",bg="ivory2", font=("Verdana",24)).place(x=10,y=10,width=580, height=50)
tk.Label(self, text="¿Que accion desea realizar?").place(x=210,y=70,width=200, height=30)
tk.Button(self,text="Ir al catalogo", command=self.catalogo).place(x=100,y=200,width=120, height=30)
tk.Button(self,text="Generar boleta", command=self.Caja).place(x=250,y=200,width=120, height=30)
tk.Button(self,text="Cerrar caja").place(x=400,y=200,width=120, height=30)
tk.Button(self,text="Salir",bg="red4").place(x=400,y=300,width=120, height=30)
def catalogo(self):
catalogo.Ventana_catalogo()
if __name__ == "__main__":
root = tk.Tk()
Ventana_Principal(root)
root.mainloop() | StarcoderdataPython |
3403308 | <filename>platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/sol/calculators/calc_fpll.py
from pyradioconfig.calculator_model_framework.Utils.LogMgr import LogMgr
from pyradioconfig.parts.ocelot.calculators.calc_fpll import calc_fpll_ocelot
from pycalcmodel.core.variable import ModelVariableFormat, CreateModelVariableEnum
from pyradioconfig.calculator_model_framework.interfaces.itarget import ITarget
from enum import Enum
class calc_fpll_sol(calc_fpll_ocelot):
def buildVariables(self, model):
#Build variables from Ocelot
super().buildVariables(model)
#Add calculator model variables
self._addModelVariable(model, 'fpll_divx', int, ModelVariableFormat.DECIMAL, desc='RFFPLL X divider')
self._addModelVariable(model, 'fpll_divy', int, ModelVariableFormat.DECIMAL, desc='RFFPLL Y divider')
self._addModelVariable(model, 'fpll_divn', int, ModelVariableFormat.DECIMAL, desc='RFFPLL N divider')
self._addModelVariable(model, 'fpll_div_array', int, ModelVariableFormat.DECIMAL, desc='RFFPLL divider array [divx,divy,divn]', is_array=True)
self._addModelVariable(model, 'fpll_divx_freq', float, ModelVariableFormat.DECIMAL, units='Hz', desc='RFFPLL frequency after DIVX')
self._addModelVariable(model, 'fpll_divy_freq', float, ModelVariableFormat.DECIMAL, units='Hz', desc='RFFPLL frequency after DIVY')
self._addModelVariable(model, 'dac_freq_actual', float, ModelVariableFormat.DECIMAL, units='Hz', desc='DAC frequency')
self._addModelVariable(model, 'fpll_band', Enum , ModelVariableFormat.DECIMAL, desc='Used to optimize the modem and system clock rates based on the RF band selected. The same selection must be made for all PHYs in a configuration.')
model.vars.fpll_band.var_enum = CreateModelVariableEnum(
enum_name='FpllBandEnum',
enum_desc='RF Frequency Planning Band Selection',
member_data=[
['BAND_928', 0, '928 to 960 MHz'],
['BAND_9xx', 1, '902 to 928 MHz'],
['BAND_896', 2, '896 to 901 MHz'],
['BAND_863', 3, '863 to 870 MHz'],
['BAND_780', 4, '779 to 787 MHz'],
['BAND_470', 5, '470 to 510 MHz'],
['BAND_450', 6, 'Below 470 MHz'],
])
def calc_fpll_band(self, model):
rf_freq = model.vars.base_frequency_hz.value
fpll_band_enum = model.vars.fpll_band.var_enum
if (rf_freq < 470e6):
fpll_band = fpll_band_enum.BAND_450
elif (rf_freq < 600e6):
fpll_band = fpll_band_enum.BAND_470
elif (rf_freq < 800e6):
fpll_band = fpll_band_enum.BAND_780
elif (rf_freq < 880e6 ):
fpll_band = fpll_band_enum.BAND_863
elif (rf_freq < 902e6):
fpll_band = fpll_band_enum.BAND_896
elif (rf_freq < 928e6 ):
fpll_band = fpll_band_enum.BAND_9xx
else:
fpll_band = fpll_band_enum.BAND_928
# Write the model var
model.vars.fpll_band.value = fpll_band
def calc_fpll_dividers(self, model):
xtal_frequency_hz = model.vars.xtal_frequency_hz.value
fpll_band = model.vars.fpll_band.value
fpll_band_enum = model.vars.fpll_band.var_enum
if xtal_frequency_hz == 38e6:
if fpll_band == fpll_band_enum.BAND_450:
divx = 7
divn = 118
divy = 23
elif fpll_band == fpll_band_enum.BAND_470:
divx = 6
divn = 98
divy = 19
elif fpll_band == fpll_band_enum.BAND_780:
divx = 7
divn = 118
divy = 23
elif fpll_band == fpll_band_enum.BAND_863:
divx = 6
divn = 101
divy = 20
elif fpll_band == fpll_band_enum.BAND_896:
divx = 7
divn = 118
divy = 23
elif fpll_band == fpll_band_enum.BAND_9xx:
divx = 6
divn = 103
divy = 20
else: #BAND_928
divx = 7
divn = 118
divy = 23
elif xtal_frequency_hz == 38.4e6:
if fpll_band == fpll_band_enum.BAND_450:
divx = 6
divn = 100
divy = 20
elif fpll_band == fpll_band_enum.BAND_470:
divx = 6
divn = 97
divy = 19
elif fpll_band == fpll_band_enum.BAND_780:
divx = 6
divn = 100
divy = 20
elif fpll_band == fpll_band_enum.BAND_863:
divx = 6
divn = 100
divy = 20
elif fpll_band == fpll_band_enum.BAND_896:
divx = 6
divn = 100
divy = 20
elif fpll_band == fpll_band_enum.BAND_9xx:
divx = 5
divn = 85
divy = 17
else: #BAND_928
divx = 7
divn = 117
divy = 23
elif xtal_frequency_hz == 39e6:
if fpll_band == fpll_band_enum.BAND_450:
divx = 7
divn = 115
divy = 23
elif fpll_band == fpll_band_enum.BAND_470:
divx = 7
divn = 111
divy = 22
elif fpll_band == fpll_band_enum.BAND_780:
divx = 7
divn = 115
divy = 23
elif fpll_band == fpll_band_enum.BAND_863:
divx = 6
divn = 98
divy = 20
elif fpll_band == fpll_band_enum.BAND_896:
divx = 7
divn = 115
divy = 23
elif fpll_band == fpll_band_enum.BAND_9xx:
divx = 6
divn = 100
divy = 20
else: #BAND_928
divx = 6
divn = 99
divy = 20
elif xtal_frequency_hz == 40e6:
if fpll_band == fpll_band_enum.BAND_450:
divx = 5
divn = 80
divy = 16
elif fpll_band == fpll_band_enum.BAND_470:
divx = 6
divn = 93
divy = 19
elif fpll_band == fpll_band_enum.BAND_780:
divx = 5
divn = 80
divy = 16
elif fpll_band == fpll_band_enum.BAND_863:
divx = 5
divn = 80
divy = 16
elif fpll_band == fpll_band_enum.BAND_896:
divx = 5
divn = 80
divy = 16
elif fpll_band == fpll_band_enum.BAND_9xx:
divx = 5
divn = 81
divy = 17
else: #BAND_928
divx = 5
divn = 80
divy = 16
else:
LogMgr.Warning("Unsupported xtal frequency, assuming modem clock equal to HFXO unless explicitly set")
divx = 5
divn = 80
divy = 16
# Write the model vars
model.vars.fpll_divx.value = divx
model.vars.fpll_divy.value = divy
model.vars.fpll_divn.value = divn
def calc_fpll_div_array(self, model):
fpll_divx = model.vars.fpll_divx.value
fpll_divy = model.vars.fpll_divy.value
fpll_divn = model.vars.fpll_divn.value
model.vars.fpll_div_array.value = [fpll_divx, fpll_divy, fpll_divn]
def calc_fpll_output_freq(self, model):
xtal_frequency_hz = model.vars.xtal_frequency_hz.value
fpll_divn = model.vars.fpll_divn.value
fpll_divx = model.vars.fpll_divx.value
fpll_divy = model.vars.fpll_divy.value
divr = 2 #Assumption
# Calculate the VCO frequency given PLL settings
fvco = xtal_frequency_hz * fpll_divn / divr # Pull range 1.6 - 2.25GHz
# Calculate the output rates
fpll_divx_freq = fvco / fpll_divx
fpll_divy_freq = fvco / fpll_divy
#Write the model vars
model.vars.fpll_divx_freq.value = fpll_divx_freq
model.vars.fpll_divy_freq.value = fpll_divy_freq
def calc_modem_frequency(self, model):
fpll_divx_freq = model.vars.fpll_divx_freq.value
divxmodemsel = 2 # Assumption
modem_frequency_hz = fpll_divx_freq / divxmodemsel / 4.0
model.vars.modem_frequency_hz.value = modem_frequency_hz
def calc_adc_freq_actual(self, model):
#Read in model variables
fpll_divx_freq = model.vars.fpll_divx_freq.value
divxadcsel = model.vars.RFFPLL0_RFFPLLCTRL1_DIVXADCSEL.value + 1
xtal_frequency_hz = model.vars.xtal_frequency_hz.value
rx_ifadc_en_xo_bypass = model.vars.RAC_IFADCTRIM1_IFADCENXOBYPASS.value
fadc_target = model.vars.adc_target_freq.value
fsynth = model.vars.rx_synth_freq_actual.value
adc_vco_div_actual = model.vars.adc_vco_div_actual.value
adc_clock_mode_actual = model.vars.adc_clock_mode_actual.value
# Calculate ADC rate based on xtal PLL settings
fadc = fpll_divx_freq / divxadcsel # ADC clock frequency
# If using XO bypass, then calculate ADC rate based on xtal only
if adc_clock_mode_actual == model.vars.adc_clock_mode.var_enum.HFXOMULT:
if 1 == rx_ifadc_en_xo_bypass:
adc_freq_actual = xtal_frequency_hz
else:
adc_freq_actual = int(fadc)
else:
adc_freq_actual = int(fsynth / adc_vco_div_actual)
# Compute the final ADC frequency percent error
ferror = 100 * (fadc_target - adc_freq_actual) / float(fadc_target)
# Load local variables back into model variables
model.vars.adc_freq_actual.value = adc_freq_actual
model.vars.adc_freq_error.value = ferror
return
def calc_dac_freq_actual(self, model):
# Read in model variables
fpll_divx_freq = model.vars.fpll_divx_freq.value
divxdacsel = 8
# Calculate the DAC frequency given PLL settings
fdac = fpll_divx_freq / divxdacsel # DAC clock frequency
# Write model variables
model.vars.dac_freq_actual.value = fdac
def calc_fpll_sim_regs(self, model):
fpll_divx = model.vars.fpll_divx.value
fpll_divy = model.vars.fpll_divy.value
fpll_divn = model.vars.fpll_divn.value
#Only write these in the case of Sim target
if model.target == ITarget.SIM_str:
self._reg_write(model.vars.RFFPLL0_RFFPLLCTRL1_DIVX, fpll_divx)
self._reg_write(model.vars.RFFPLL0_RFFPLLCTRL1_DIVY, fpll_divy)
self._reg_write(model.vars.RFFPLL0_RFFPLLCTRL1_DIVN, fpll_divn)
self._reg_write(model.vars.RFFPLL0_RFFPLLCTRL1_DIVXMODEMSEL, 1) #Corresponds to divider of 2
self._reg_write(model.vars.RFFPLL0_RFFPLLCTRL1_DIVXDACSEL, 7) # Corresponds to divider of 8
| StarcoderdataPython |
6577120 | <filename>mevis/_internal/args.py
from collections.abc import Iterable as _Iterable
def check_arg(value, name, allowed_types=None, allowed_values=None, allow_none=False):
"""Check if a user-provided argument has a valid type and value.
Parameters
----------
value : any type
Value that a user provided for an argument.
name : str
Name of the argument, used for proper Exception messages.
allowed_types : type or list of types, optional
allowed_values : list of values of any type, optional
allow_none : bool, optional
If True, ``value`` can be ``None`` and then no type or value checks
are performed on it.
Raises
------
TypeError
If the type of the given value is not contained in ``allowed_types``.
ValueError
If the given value is not contained in ``allowed_values``.
"""
if not (allow_none and value is None):
# Check type
if allowed_types is not None and not isinstance(value, allowed_types):
type_name = value.__class__.__name__
if not isinstance(allowed_types, _Iterable):
allowed_types = [allowed_types]
allowed_type_names = ', '.join(t.__name__ for t in allowed_types)
message = 'Argument "{}" has a wrong type: {}\n\nAllowed types: {}'.format(
name, type_name, allowed_type_names)
raise TypeError(message)
# Check value
if allowed_values is not None and value not in allowed_values:
allowed_value_names = ', '.join(str(v) for v in allowed_values)
message = 'Argument "{}" has a wrong value: {}\n\nAllowed values: {}'.format(
name, value, allowed_value_names)
raise ValueError(message)
| StarcoderdataPython |
4802166 | <reponame>openeuler-mirror/radiaTest
import json
from flask import g
from flask_restful import Resource
from flask_pydantic import validate
from server import socketio, casbin_enforcer
from server.utils.auth_util import auth
from server.utils.response_util import response_collect
from server.schema.task import *
from .handlers import HandlerTaskStatus, HandlerTask, HandlerTaskParticipant, HandlerTaskComment, HandlerTaskTag
from .handlers import HandlerTaskFamily, HandlerTaskCase, HandlerTaskReport
from .handlers import HandlerTaskMilestone, HandlerTaskStatistics
from .handlers import HandlerTaskExecute, HandlerCaseTask, HandlerCaseFrame
from .template_handler import HandlerTemplate, HandlerTemplateType, HandlerTaskDistributeCass
class Status(Resource):
@auth.login_required()
@response_collect
def get(self):
return HandlerTaskStatus.get()
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def post(self, body: AddTaskStatusSchema):
return HandlerTaskStatus.add(body)
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def put(self, status_id, body: UpdateTaskStatusSchema):
return HandlerTaskStatus.update(status_id, body)
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def delete(self, status_id):
return HandlerTaskStatus.delete(status_id)
class StatusOrder(Resource):
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def put(self, body: UpdateTaskStatusOrderSchema):
return HandlerTaskStatus.update_order(body)
class Task(Resource):
@auth.login_required()
@response_collect
@validate()
def get(self, query: QueryTaskSchema):
return HandlerTask.get_all(g.gitee_id, query)
@auth.login_required()
@response_collect
@validate()
def post(self, body: AddTaskSchema):
return HandlerTask.create(body)
class TaskItem(Resource):
@auth.login_required()
@response_collect
@casbin_enforcer.enforcer
def get(self, task_id: int):
return HandlerTask.get(task_id)
@auth.login_required()
@response_collect
@casbin_enforcer.enforcer
def delete(self, task_id):
return HandlerTask.delete(task_id)
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def put(self, task_id, body: UpdateTaskSchema):
return HandlerTask.update(task_id, body)
class ParticipantItem(Resource):
@auth.login_required()
@response_collect
@casbin_enforcer.enforcer
def get(self, task_id):
return HandlerTaskParticipant.get(task_id)
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def put(self, task_id, body: UpdateTaskParticipantSchema):
return HandlerTaskParticipant.update(task_id, body)
class Participants(Resource):
@auth.login_required()
@response_collect
@casbin_enforcer.enforcer
def get(self):
return HandlerTaskParticipant.get(None, query_task=True)
class Comment(Resource):
@auth.login_required()
@response_collect
@casbin_enforcer.enforcer
def get(self, task_id):
return HandlerTaskComment.get(task_id)
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def delete(self, task_id, body: DelTaskCommentSchema):
return HandlerTaskComment.delete(task_id, body)
@auth.login_required()
@response_collect
@validate()
def post(self, task_id, body: AddTaskCommentSchema):
return HandlerTaskComment.add(task_id, body)
class RecycleBin(Resource):
@auth.login_required()
@response_collect
@validate()
def get(self, query: PageBaseSchema):
return HandlerTask.get_recycle_bin(query)
class Tag(Resource):
@auth.login_required()
@response_collect
def get(self):
return HandlerTaskTag.get()
@auth.login_required()
@response_collect
@validate()
def post(self, body: AddTaskTagSchema):
return HandlerTaskTag.add(body)
@auth.login_required()
@response_collect
@validate()
def delete(self, body: DelTaskTagSchema):
return HandlerTaskTag.delete(body)
class FamilyItem(Resource):
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def get(self, task_id, query: QueryFamilySchema):
return HandlerTaskFamily.get(task_id, query)
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def post(self, task_id, body: AddFamilyMemberSchema):
return HandlerTaskFamily.add(task_id, body)
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def delete(self, task_id, body: DelFamilyMemberSchema):
return HandlerTaskFamily.delete(task_id, body)
class Family(Resource):
@auth.login_required()
@response_collect
def get(self):
return HandlerTaskFamily.get(None, None)
class Report(Resource):
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def get(self, task_id):
return HandlerTaskReport.get(task_id)
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def put(self, task_id, body: TaskReportContentSchema):
return HandlerTaskReport.update(task_id, body)
class Cases(Resource):
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def get(self, task_id, query: QueryTaskCaseSchema):
return HandlerTaskCase.get(task_id, query)
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def post(self, task_id, milestone_id, body: AddTaskCaseSchema):
return HandlerTaskCase.add(task_id, milestone_id, body)
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def delete(self, task_id, milestone_id, body: DelTaskCaseSchema):
return HandlerTaskCase.delete(task_id, milestone_id, body)
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def put(self, task_id, milestone_id, body: DistributeTaskCaseSchema):
return HandlerTaskCase.distribute(task_id, milestone_id, body)
class CasesResult(Resource):
@auth.login_required()
@response_collect
@casbin_enforcer.enforcer
def get(self, task_id):
return HandlerTaskCase.task_cases_result(task_id)
class TaskStatistics(Resource):
@auth.login_required()
@response_collect
@validate()
def get(self, query: QueryTaskStatisticsSchema):
return HandlerTaskStatistics(query).run()
class TaskMilestones(Resource):
@validate()
def put(self, taskmilestone_id: int, body: TaskJobResultSchema):
return HandlerTaskMilestone.update_task_process(taskmilestone_id, body)
class TaskMilestonesCases(Resource):
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def put(self, task_id: int, taskmilestone_id: int, case_id: int, body: TaskCaseResultSchema):
return HandlerTaskMilestone.update_manual_cases_result(task_id, taskmilestone_id, case_id, body)
class TaskExecute(Resource):
@validate()
def post(self, body: OutAddTaskSchema):
e = HandlerTaskExecute().create(body)
if not isinstance(e, HandlerTaskExecute):
return e
return e.execute()
class TaskDistributeTemplate(Resource):
@auth.login_required()
@response_collect
@validate()
def get(self, query: DistributeTemplate.Query):
return HandlerTemplate.get(query)
@auth.login_required()
@response_collect
@validate()
def post(self, body: DistributeTemplate.Add):
return HandlerTemplate.add(body)
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def put(self, template_id, body: DistributeTemplate.Update):
return HandlerTemplate.update(template_id, body)
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def delete(self, template_id):
return HandlerTemplate.delete(template_id)
class DistributeType(Resource):
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def get(self, query: DistributeTemplateTypeSchema.Query):
return HandlerTemplateType.get(query)
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def post(self, template_id, body: DistributeTemplateTypeSchema.Add):
return HandlerTemplateType.add(template_id, body)
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def put(self, type_id, body: DistributeTemplateTypeSchema.Update):
return HandlerTemplateType.update(type_id, body)
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def delete(self, type_id):
return HandlerTemplateType.delete(type_id)
class DistributeCaseByTemplate(Resource):
@auth.login_required()
@response_collect
@validate()
@casbin_enforcer.enforcer
def put(self, task_id, template_id, body: DistributeTemplate.Distribute):
return HandlerTaskDistributeCass().distribute(task_id, template_id, body)
class TaskList(Resource):
@auth.login_required()
@response_collect
@validate()
def put(self, body: DeleteTaskList):
return HandlerTask.delete_task_list(body)
class CaseTask(Resource):
@auth.login_required()
@response_collect
def get(self, case_id):
return HandlerCaseTask.get_task_info(case_id)
class TaskFrame(Resource):
@auth.login_required()
@response_collect
def get(self):
return HandlerCaseFrame.get_task_frame()
class MileStoneTask(Resource):
@auth.login_required()
@response_collect
@validate()
def get(self, milestone_id, query: MilestoneTaskSchema):
return HandlerTask.get_milestone_tasks(milestone_id, query)
| StarcoderdataPython |
11316803 | from unittest import TestCase
from doctest import DocTestSuite
from cr8 import java_magic
from cr8.java_magic import _parse_java_version
class JavaVersionParsingTest(TestCase):
def assertVersion(self, line, expected):
version = _parse_java_version(line)
self.assertEqual(version, expected)
def test_java_8_line(self):
self.assertVersion('openjdk version "1.8.0_202"', (8, 0, 202))
def test_java_10_line(self):
self.assertVersion('openjdk version "10.0.2" 2018-07-17', (10, 0, 2))
def test_java_11_line(self):
self.assertVersion('java 11.0.1 2018-10-16 LTS', (11, 0, 1))
def test_java_12_line(self):
self.assertVersion('openjdk version "12" 2019-03-19', (12, 0, 0))
def load_tests(loader, tests, ignore):
tests.addTests(DocTestSuite(java_magic))
return tests
| StarcoderdataPython |
8194784 | <filename>src/ploomber/sources/inspect.py
"""
Extensions for the inspect module
"""
import inspect
def getfile(fn):
"""
Returns the file where the function is defined. Works even in wrapped
functions
"""
if hasattr(fn, '__wrapped__'):
return getfile(fn.__wrapped__)
else:
return inspect.getfile(fn)
| StarcoderdataPython |
12864307 | import os
from xappt_qt.__version__ import __version__, __build__
from xappt_qt.plugins.interfaces.qt import QtInterface
# suppress "qt.qpa.xcb: QXcbConnection: XCB error: 3 (BadWindow)"
os.environ['QT_LOGGING_RULES'] = '*.debug=false;qt.qpa.*=false'
version = tuple(map(int, __version__.split('.'))) + (__build__, )
version_str = f"{__version__}-{__build__}"
executable = None
| StarcoderdataPython |
5021372 | <reponame>Chkoda/Deep-Learning-Model-Evaluation
from __future__ import print_function
import sys
import os
import math
from optparse import OptionParser
from keras.models import load_model, Model
from argparse import ArgumentParser
from keras import backend as K
import numpy as np
import h5py
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.metrics import roc_curve, auc
import pandas as pd
from keras.utils.conv_utils import convert_kernel
import tensorflow as tf
from constraints import ZeroSomeWeights
from train import print_model_to_json
from keras.utils.generic_utils import get_custom_objects
get_custom_objects().update({"ZeroSomeWeights": ZeroSomeWeights})
# To turn off GPU
#os.environ['CUDA_VISIBLE_DEVICES'] = ''
def getWeightArray(model):
allWeights = []
allWeightsNonRel = []
allWeightsByLayer = {}
allWeightsByLayerNonRel = {}
for layer in model.layers:
if layer.__class__.__name__ in ['Dense', 'Conv1D', 'LSTM']:
original_w = layer.get_weights()
weightsByLayer = []
weightsByLayerNonRel = []
for my_weights in original_w:
if len(my_weights.shape) < 2: # bias term, ignore for now
continue
#l1norm = tf.norm(my_weights,ord=1)
elif len(my_weights.shape) == 2: # Dense or LSTM
tensor_abs = tf.abs(my_weights)
tensor_reduce_max_1 = tf.reduce_max(tensor_abs,axis=-1)
tensor_reduce_max_2 = tf.reduce_max(tensor_reduce_max_1,axis=-1)
elif len(my_weights.shape) == 3: # Conv1D
# (filter_width, n_inputs, n_filters)
tensor_abs = tf.abs(my_weights)
tensor_reduce_max_0 = tf.reduce_max(tensor_abs,axis=-1)
tensor_reduce_max_1 = tf.reduce_max(tensor_reduce_max_0,axis=-1)
tensor_reduce_max_2 = tf.reduce_max(tensor_reduce_max_1,axis=-1)
with tf.Session():
#l1norm_val = float(l1norm.eval())
tensor_max = float(tensor_reduce_max_2.eval())
it = np.nditer(my_weights, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
w = it[0]
allWeights.append(abs(w)/tensor_max)
allWeightsNonRel.append(abs(w))
weightsByLayer.append(abs(w)/tensor_max)
weightsByLayerNonRel.append(abs(w))
it.iternext()
if len(weightsByLayer)>0:
allWeightsByLayer[layer.name] = np.array(weightsByLayer)
allWeightsByLayerNonRel[layer.name] = np.array(weightsByLayerNonRel)
return np.array(allWeights), allWeightsByLayer, np.array(allWeightsNonRel), allWeightsByLayerNonRel
if __name__ == "__main__":
parser = OptionParser()
parser.add_option('-m','--model' ,action='store',type='string',dest='inputModel' ,default='train_simple/KERAS_check_best_model.h5', help='input model')
parser.add_option('--relative-weight-max' ,action='store',type='float',dest='relative_weight_max' ,default=None, help='max relative weight')
parser.add_option('--relative-weight-percentile' ,action='store',type='float',dest='relative_weight_percentile' ,default=None, help='relative weight percentile')
parser.add_option('-o','--outputModel' ,action='store',type='string',dest='outputModel' ,default='prune_simple/pruned_model.h5', help='output directory')
(options,args) = parser.parse_args()
from models import three_layer_model
from keras.layers import Input
model = load_model(options.inputModel, custom_objects={'ZeroSomeWeights':ZeroSomeWeights})
model.load_weights(options.inputModel)
weightsPerLayer = {}
droppedPerLayer = {}
binaryTensorPerLayer = {}
allWeightsArray,allWeightsByLayer,allWeightsArrayNonRel,allWeightsByLayerNonRel = getWeightArray(model)
if options.relative_weight_percentile is not None:
relative_weight_max = np.percentile(allWeightsArray,options.relative_weight_percentile,axis=-1)
elif options.relative_weight_max is not None:
relative_weight_max = options.relative_weight_max
else:
print('Need to set pruning criteria')
sys.exit()
for layer in model.layers:
droppedPerLayer[layer.name] = []
if layer.__class__.__name__ in ['Dense', 'Conv1D', 'LSTM']:
original_w = layer.get_weights()
weightsPerLayer[layer.name] = original_w
for my_weights in original_w:
if len(my_weights.shape) < 2: # bias term, skip for now
continue
#l1norm = tf.norm(my_weights,ord=1)
elif len(my_weights.shape) == 2: # Dense
tensor_abs = tf.abs(my_weights)
tensor_reduce_max_1 = tf.reduce_max(tensor_abs,axis=-1)
tensor_reduce_max_2 = tf.reduce_max(tensor_reduce_max_1,axis=-1)
elif len(my_weights.shape) == 3: # Conv1D
tensor_abs = tf.abs(my_weights)
tensor_reduce_max_0 = tf.reduce_max(tensor_abs,axis=-1)
tensor_reduce_max_1 = tf.reduce_max(tensor_reduce_max_0,axis=-1)
tensor_reduce_max_2 = tf.reduce_max(tensor_reduce_max_1,axis=-1)
with tf.Session():
#l1norm_val = float(l1norm.eval())
tensor_max = float(tensor_reduce_max_2.eval())
it = np.nditer(my_weights, flags=['multi_index'], op_flags=['readwrite'])
binaryTensorPerLayer[layer.name] = np.ones(my_weights.shape)
while not it.finished:
w = it[0]
if abs(w)/tensor_max < relative_weight_max:
#print("small relative weight %e/%e = %e -> 0"%(abs(w), tensor_max, abs(w)/tensor_max))
w[...] = 0
droppedPerLayer[layer.name].append((it.multi_index, abs(w)))
binaryTensorPerLayer[layer.name][it.multi_index] = 0
it.iternext()
#print('%i weights dropped from %s out of %i weights'%(len(droppedPerLayer[layer.name]),layer.name,layer.count_params()))
#converted_w = convert_kernel(original_w)
converted_w = original_w
layer.set_weights(converted_w)
print('Summary:')
totalDropped = sum([len(droppedPerLayer[layer.name]) for layer in model.layers])
for layer in model.layers:
print('%i weights dropped from %s out of %i weights'%(len(droppedPerLayer[layer.name]),layer.name, layer.count_params()))
print('%i total weights dropped out of %i total weights'%(totalDropped,model.count_params()))
print('%.1f%% compression'%(100.*totalDropped/model.count_params()))
model.save(options.outputModel)
model.save_weights(options.outputModel.replace('.h5','_weights.h5'))
print_model_to_json(model, options.outputModel.replace('.h5','.json'))
# save binary tensor in h5 file
h5f = h5py.File(options.outputModel.replace('.h5','_drop_weights.h5'),'w')
for layer, binary_tensor in binaryTensorPerLayer.items():
h5f.create_dataset('%s'%layer, data = binaryTensorPerLayer[layer])
h5f.close()
# plot the distribution of weights
if options.relative_weight_percentile is not None:
your_percentile = options.relative_weight_percentile
else:
your_percentile = stats.percentileofscore(allWeightsArray, relative_weight_max)
#percentiles = [5,16,50,84,95,your_percentile]
percentiles = [5,95,your_percentile]
#colors = ['r','r','r','r','r','g']
colors = ['r','r','g']
vlines = np.percentile(allWeightsArray,percentiles,axis=-1)
xmin = np.amin(allWeightsArray[np.nonzero(allWeightsArray)])
xmax = np.amax(allWeightsArray)
xmin = 6e-8
xmax = 1
bins = np.linspace(xmin, xmax, 50)
logbins = np.geomspace(xmin, xmax, 50)
labels = []
histos = []
for key in reversed(sorted(allWeightsByLayer.keys())):
labels.append(key)
histos.append(allWeightsByLayer[key])
plt.figure()
#plt.hist(allWeightsArray,bins=bins)
#plt.hist(allWeightsByLayer.values(),bins=bins,histtype='bar',stacked=True,label=allWeightsByLayer.keys())
plt.hist(histos,bins=bins,histtype='step',stacked=False,label=labels)
plt.legend(prop={'size':10}, frameon=False)
axis = plt.gca()
ymin, ymax = axis.get_ylim()
for vline, percentile, color in zip(vlines, percentiles, colors):
if percentile==0: continue
if vline < xmin: continue
plt.axvline(vline, 0, 1, color=color, linestyle='dashed', linewidth=1, label = '%s%%'%percentile)
plt.text(vline, ymax+0.01*(ymax-ymin), '%s%%'%percentile, color=color, horizontalalignment='center')
plt.ylabel('Number of Weights')
plt.xlabel('Absolute Relative Weights')
plt.savefig(options.outputModel.replace('.h5','_weight_histogram.pdf'))
plt.figure()
#plt.hist(allWeightsArray,bins=logbins)
#plt.hist(allWeightsByLayer.values(),bins=logbins,histtype='bar',stacked=True,label=allWeightsByLayer.keys())
plt.hist(histos,bins=logbins,histtype='step',stacked=False,label=labels)
plt.semilogx()
plt.legend(prop={'size':10}, frameon=False)
axis = plt.gca()
ymin, ymax = axis.get_ylim()
for vline, percentile, color in zip(vlines, percentiles, colors):
if percentile==0: continue
if vline < xmin: continue
xAdd = 0
yAdd = 0
#if plotPercentile5 and percentile==84:
# xAdd=0.2
#if plotPercentile16 and percentile==95:
# xAdd=1.2
plt.axvline(vline, 0, 1, color=color, linestyle='dashed', linewidth=1, label = '%s%%'%percentile)
plt.text(vline+xAdd, ymax+0.01*(ymax-ymin)+yAdd, '%s%%'%percentile, color=color, horizontalalignment='center')
plt.ylabel('Number of Weights')
plt.xlabel('Absolute Relative Weights')
plt.figtext(0.25, 0.90,'hls4ml',fontweight='bold', wrap=True, horizontalalignment='right', fontsize=14)
#plt.figtext(0.35, 0.90,'preliminary', style='italic', wrap=True, horizontalalignment='center', fontsize=14)
plt.savefig(options.outputModel.replace('.h5','_weight_histogram_logx.pdf'))
labels = []
histos = []
for key in reversed(sorted(allWeightsByLayerNonRel.keys())):
labels.append(key)
histos.append(allWeightsByLayerNonRel[key])
xmin = np.amin(allWeightsArrayNonRel[np.nonzero(allWeightsArrayNonRel)])
xmax = np.amax(allWeightsArrayNonRel)
#bins = np.linspace(xmin, xmax, 100)
bins = np.geomspace(xmin, xmax, 50)
plt.figure()
#plt.hist(allWeightsArrayNonRel,bins=bins)
#plt.hist(allWeightsByLayerNonRel.values(),bins=bins,histtype='bar',stacked=True,label=allWeightsByLayer.keys())
plt.hist(histos,bins=bins,histtype='step',stacked=False,label=labels)
plt.semilogx(basex=2)
plt.legend(prop={'size':10}, frameon=False, loc='upper left')
plt.ylabel('Number of Weights')
plt.xlabel('Absolute Value of Weights')
plt.figtext(0.25, 0.90,'hls4ml',fontweight='bold', wrap=True, horizontalalignment='right', fontsize=14)
#plt.figtext(0.35, 0.90,'preliminary', style='italic', wrap=True, horizontalalignment='center', fontsize=14)
plt.savefig(options.outputModel.replace('.h5','_weight_nonrel_histogram_logx.pdf'))
| StarcoderdataPython |
11321447 | <reponame>uktrade/jupyterhub-data-auth-admin
import psycopg2
import pytest
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.test import Client, TestCase, override_settings
from dataworkspace.apps.core.utils import database_dsn
from dataworkspace.tests import factories
from dataworkspace.cel import celery_app
@pytest.fixture
def staff_user(db):
staff_user = get_user_model().objects.create(
username="<EMAIL>",
email="<EMAIL>",
is_staff=True,
is_superuser=True,
first_name="Bob",
)
staff_user.profile.sso_id = "aae8901a-082f-4f12-8c6c-fdf4aeba2d68"
staff_user.profile.save()
return staff_user
def get_staff_user_data(db, staff_user):
return {
"HTTP_SSO_PROFILE_EMAIL": staff_user.email,
"HTTP_SSO_PROFILE_CONTACT_EMAIL": staff_user.email,
"HTTP_SSO_PROFILE_RELATED_EMAILS": "",
"HTTP_SSO_PROFILE_USER_ID": staff_user.profile.sso_id,
"HTTP_SSO_PROFILE_LAST_NAME": "Testerson",
"HTTP_SSO_PROFILE_FIRST_NAME": "Bob",
}
@pytest.fixture
def staff_user_data(db, staff_user):
return get_staff_user_data(db, staff_user)
def get_staff_client(staff_user_data):
return Client(**staff_user_data)
@pytest.fixture
def staff_client(staff_user_data):
return get_staff_client(staff_user_data)
@pytest.fixture
def user(db):
user = get_user_model().objects.create(
username="<EMAIL>",
is_staff=False,
is_superuser=False,
email="<EMAIL>",
first_name="Frank",
)
return user
@pytest.fixture
def user_data(db, user):
return {
"HTTP_SSO_PROFILE_EMAIL": user.email,
"HTTP_SSO_PROFILE_CONTACT_EMAIL": user.email,
"HTTP_SSO_PROFILE_RELATED_EMAILS": "",
"HTTP_SSO_PROFILE_USER_ID": "aae8901a-082f-4f12-8c6c-fdf4aeba2d69",
"HTTP_SSO_PROFILE_LAST_NAME": "Exampleson",
"HTTP_SSO_PROFILE_FIRST_NAME": "Frank",
}
@pytest.fixture
def client(user_data):
return Client(**user_data)
@pytest.fixture
def sme_user(db):
sme_group = Group.objects.get(name="Subject Matter Experts")
user = get_user_model().objects.create(
username="<EMAIL>",
email="<EMAIL>",
is_staff=True,
is_superuser=False,
)
sme_group.user_set.add(user)
sme_group.save()
return user
@pytest.fixture
def sme_user_data(db, sme_user):
return {
"HTTP_SSO_PROFILE_EMAIL": sme_user.email,
"HTTP_SSO_PROFILE_CONTACT_EMAIL": sme_user.email,
"HTTP_SSO_PROFILE_RELATED_EMAILS": "",
"HTTP_SSO_PROFILE_USER_ID": "aae8901a-082f-4f12-8c6c-fdf4aeba2d70",
"HTTP_SSO_PROFILE_LAST_NAME": "Sampledóttir",
"HTTP_SSO_PROFILE_FIRST_NAME": "Jane",
}
@pytest.fixture
def sme_client(sme_user, sme_user_data):
client = Client(**sme_user_data)
client.force_login(sme_user)
return client
@pytest.fixture
def unauthenticated_client():
return Client()
@pytest.fixture
def request_client(request):
"""
Allows for passing a fixture name to parameterize to return a named fixture
"""
return request.getfixturevalue(request.param)
@pytest.fixture(scope="session")
def test_case():
return TestCase("run")
@pytest.fixture
def metadata_db(db):
database = factories.DatabaseFactory(memorable_name="my_database")
with psycopg2.connect(
database_dsn(settings.DATABASES_DATA["my_database"])
) as conn, conn.cursor() as cursor:
cursor.execute(
"""
CREATE SCHEMA IF NOT EXISTS dataflow;
CREATE TABLE IF NOT EXISTS dataflow.metadata (
id SERIAL,
table_schema TEXT,
table_name TEXT,
source_data_modified_utc TIMESTAMP WITHOUT TIME ZONE,
dataflow_swapped_tables_utc TIMESTAMP WITHOUT TIME ZONE,
table_structure JSONB,
data_ids TEXT[],
data_type INTEGER NOT NULL,
data_hash_v1 TEXT,
primary_keys TEXT[]
);
TRUNCATE TABLE dataflow.metadata;
INSERT INTO dataflow.metadata (
table_schema, table_name, source_data_modified_utc, dataflow_swapped_tables_utc, table_structure, data_type
)
VALUES
('public','table1','2020-09-02 00:01:00.0','2020-09-02 00:01:00.0','{"field1":"int","field2":"varchar"}',1),
('public','table2','2020-09-01 00:01:00.0','2020-09-02 00:01:00.0',NULL,1),
('public','table1','2020-01-01 00:01:00.0','2020-09-02 00:01:00.0',NULL,1),
('public','table4', NULL,'2021-12-01 00:00:00.0',NULL,1);
"""
)
conn.commit()
return database
@pytest.fixture
def test_dataset(db):
with psycopg2.connect(
database_dsn(settings.DATABASES_DATA["my_database"])
) as conn, conn.cursor() as cursor:
cursor.execute(
"CREATE TABLE IF NOT EXISTS foo AS SELECT a,b FROM (VALUES ('test',30)) AS temp_table(a,b);"
)
cursor.execute(
"""
CREATE SCHEMA IF NOT EXISTS dataflow;
CREATE TABLE IF NOT EXISTS dataflow.metadata (
id SERIAL,
table_schema TEXT,
table_name TEXT,
source_data_modified_utc TIMESTAMP WITHOUT TIME ZONE,
dataflow_swapped_tables_utc TIMESTAMP WITHOUT TIME ZONE,
table_structure JSONB,
data_ids TEXT[],
data_type INTEGER NOT NULL,
data_hash_v1 TEXT
);
TRUNCATE TABLE dataflow.metadata;
INSERT INTO dataflow.metadata (table_schema, table_name, source_data_modified_utc, table_structure, data_type)
VALUES
('public', 'foo', '2021-01-01 00:00:00.0', '{"a":"text","b":"int"}', 1);
"""
)
conn.commit()
return ("public", "foo")
@pytest.fixture(autouse=True, scope="session")
def change_staticfiles_storage():
"""
Slightly strange, but Django recommends not using the manifest
staticfiles storage when testing because it generates the manifest from
the `collectstatic` command, which isn't run for tests, so staticfile
lookup will fail:
https://docs.djangoproject.com/en/3.1/ref/contrib/staticfiles/#django.contrib.staticfiles.storage.ManifestStaticFilesStorage.manifest_strict
"""
with override_settings(
STATICFILES_STORAGE="django.contrib.staticfiles.storage.StaticFilesStorage"
):
yield
@pytest.fixture(scope="session", autouse=True)
def make_celery_eager():
celery_app.conf.task_always_eager = True
@pytest.fixture
def dataset_db(metadata_db):
database = factories.DatabaseFactory(memorable_name="my_database")
with psycopg2.connect(database_dsn(settings.DATABASES_DATA["my_database"])) as conn:
conn.cursor().execute(
"""
CREATE TABLE IF NOT EXISTS dataset_test (
id INT,
name VARCHAR(255),
date DATE
);
CREATE TABLE IF NOT EXISTS dataset_test2 (
id INT,
name VARCHAR(255)
);
CREATE OR REPLACE VIEW dataset_view AS (SELECT * FROM dataset_test);
"""
)
return database
@pytest.fixture
def dataset_db_with_swap_table(metadata_db):
database = factories.DatabaseFactory(memorable_name="my_database")
with psycopg2.connect(database_dsn(settings.DATABASES_DATA["my_database"])) as conn:
conn.cursor().execute(
"""
CREATE TABLE IF NOT EXISTS dataset_test (
id INT,
name VARCHAR(255),
date DATE
);
DELETE FROM dataset_test;
INSERT INTO dataset_test values(1,'test','2022-01-01');
CREATE TABLE IF NOT EXISTS dataset_test_20220101t000000_swap (
id INT,
name VARCHAR(255),
date DATE
);
DELETE FROM dataset_test_20220101t000000_swap;
INSERT INTO dataset_test_20220101t000000_swap values(1,'test','2022-01-01');
INSERT INTO dataset_test_20220101t000000_swap values(2,'test_2','2022-01-02');
"""
)
return database
@pytest.fixture
def dataset_finder_db(metadata_db):
database = factories.DatabaseFactory(memorable_name="my_database")
with psycopg2.connect(database_dsn(settings.DATABASES_DATA["my_database"])) as conn:
conn.cursor().execute(
"""
CREATE TABLE IF NOT EXISTS dataworkspace__source_tables (
id INT,
name VARCHAR(255),
dataset_id UUID,
schema VARCHAR(255),
"table" VARCHAR(255)
);
CREATE TABLE IF NOT EXISTS dataworkspace__catalogue_items (
id UUID,
name VARCHAR(255),
slug VARCHAR(255)
);
INSERT INTO dataworkspace__source_tables VALUES(
1, 'public.data', '0dea6147-d355-4b6d-a140-0304ef9cfeca', 'public', 'data'
);
INSERT INTO dataworkspace__catalogue_items VALUES(
'0dea6147-d355-4b6d-a140-0304ef9cfeca', 'public.data', '1'
);
CREATE SCHEMA IF NOT EXISTS public;
CREATE TABLE IF NOT EXISTS data (
id int,
name VARCHAR(255),
database VARCHAR(255),
schema VARCHAR(255),
frequency VARCHAR(255),
"table" VARCHAR(255)
);
CREATE TABLE IF NOT EXISTS country_stats (
date DATE,
driving NUMERIC,
country VARCHAR(255)
);
"""
)
return database
| StarcoderdataPython |
1736003 | # -*- coding:utf-8 -*-
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import errno
import filecmp
import glob
import os
import random
import re
import shutil
import stat
import subprocess
import sys
import tarfile
import tempfile
import time
import traceback
from color import Coloring
from git_command import GitCommand, git_require
from git_config import GitConfig, IsId, GetSchemeFromUrl, GetUrlCookieFile, \
ID_RE
from error import GitError, HookError, UploadError, DownloadError
from error import ManifestInvalidRevisionError
from error import NoManifestException
import platform_utils
from trace import IsTrace, Trace
from git_refs import GitRefs, HEAD, R_HEADS, R_TAGS, R_PUB, R_M
from pyversion import is_python3
if is_python3():
import urllib.parse
else:
import imp
import urlparse
urllib = imp.new_module('urllib')
urllib.parse = urlparse
input = raw_input
def _lwrite(path, content):
lock = '%s.lock' % path
fd = open(lock, 'w')
try:
fd.write(content)
finally:
fd.close()
try:
platform_utils.rename(lock, path)
except OSError:
platform_utils.remove(lock)
raise
def _error(fmt, *args):
msg = fmt % args
print('error: %s' % msg, file=sys.stderr)
def _warn(fmt, *args):
msg = fmt % args
print('warn: %s' % msg, file=sys.stderr)
def not_rev(r):
return '^' + r
def sq(r):
return "'" + r.replace("'", "'\''") + "'"
_project_hook_list = None
def _ProjectHooks():
"""List the hooks present in the 'hooks' directory.
These hooks are project hooks and are copied to the '.git/hooks' directory
of all subprojects.
This function caches the list of hooks (based on the contents of the
'repo/hooks' directory) on the first call.
Returns:
A list of absolute paths to all of the files in the hooks directory.
"""
global _project_hook_list
if _project_hook_list is None:
d = platform_utils.realpath(os.path.abspath(os.path.dirname(__file__)))
d = os.path.join(d, 'hooks')
_project_hook_list = [os.path.join(d, x) for x in platform_utils.listdir(d)]
return _project_hook_list
class DownloadedChange(object):
_commit_cache = None
def __init__(self, project, base, change_id, ps_id, commit):
self.project = project
self.base = base
self.change_id = change_id
self.ps_id = ps_id
self.commit = commit
@property
def commits(self):
if self._commit_cache is None:
self._commit_cache = self.project.bare_git.rev_list('--abbrev=8',
'--abbrev-commit',
'--pretty=oneline',
'--reverse',
'--date-order',
not_rev(self.base),
self.commit,
'--')
return self._commit_cache
class ReviewableBranch(object):
_commit_cache = None
def __init__(self, project, branch, base):
self.project = project
self.branch = branch
self.base = base
@property
def name(self):
return self.branch.name
@property
def commits(self):
if self._commit_cache is None:
self._commit_cache = self.project.bare_git.rev_list('--abbrev=8',
'--abbrev-commit',
'--pretty=oneline',
'--reverse',
'--date-order',
not_rev(self.base),
R_HEADS + self.name,
'--')
return self._commit_cache
@property
def unabbrev_commits(self):
r = dict()
for commit in self.project.bare_git.rev_list(not_rev(self.base),
R_HEADS + self.name,
'--'):
r[commit[0:8]] = commit
return r
@property
def date(self):
return self.project.bare_git.log('--pretty=format:%cd',
'-n', '1',
R_HEADS + self.name,
'--')
def UploadForReview(self, people,
auto_topic=False,
draft=False,
private=False,
notify=None,
wip=False,
dest_branch=None,
validate_certs=True,
push_options=None):
self.project.UploadForReview(self.name,
people,
auto_topic=auto_topic,
draft=draft,
private=private,
notify=notify,
wip=wip,
dest_branch=dest_branch,
validate_certs=validate_certs,
push_options=push_options)
def GetPublishedRefs(self):
refs = {}
output = self.project.bare_git.ls_remote(
self.branch.remote.SshReviewUrl(self.project.UserEmail),
'refs/changes/*')
for line in output.split('\n'):
try:
(sha, ref) = line.split()
refs[sha] = ref
except ValueError:
pass
return refs
class StatusColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'status')
self.project = self.printer('header', attr='bold')
self.branch = self.printer('header', attr='bold')
self.nobranch = self.printer('nobranch', fg='red')
self.important = self.printer('important', fg='red')
self.added = self.printer('added', fg='green')
self.changed = self.printer('changed', fg='red')
self.untracked = self.printer('untracked', fg='red')
class DiffColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'diff')
self.project = self.printer('header', attr='bold')
class _Annotation(object):
def __init__(self, name, value, keep):
self.name = name
self.value = value
self.keep = keep
class _CopyFile(object):
def __init__(self, src, dest, abssrc, absdest):
self.src = src
self.dest = dest
self.abs_src = abssrc
self.abs_dest = absdest
def _Copy(self):
src = self.abs_src
dest = self.abs_dest
# copy file if it does not exist or is out of date
if not os.path.exists(dest) or not filecmp.cmp(src, dest):
try:
# remove existing file first, since it might be read-only
if os.path.exists(dest):
platform_utils.remove(dest)
else:
dest_dir = os.path.dirname(dest)
if not platform_utils.isdir(dest_dir):
os.makedirs(dest_dir)
shutil.copy(src, dest)
# make the file read-only
mode = os.stat(dest)[stat.ST_MODE]
mode = mode & ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
os.chmod(dest, mode)
except IOError:
_error('Cannot copy file %s to %s', src, dest)
class _LinkFile(object):
def __init__(self, git_worktree, src, dest, relsrc, absdest):
self.git_worktree = git_worktree
self.src = src
self.dest = dest
self.src_rel_to_dest = relsrc
self.abs_dest = absdest
def __linkIt(self, relSrc, absDest):
# link file if it does not exist or is out of date
if not platform_utils.islink(absDest) or (platform_utils.readlink(absDest) != relSrc):
try:
# remove existing file first, since it might be read-only
if os.path.lexists(absDest):
platform_utils.remove(absDest)
else:
dest_dir = os.path.dirname(absDest)
if not platform_utils.isdir(dest_dir):
os.makedirs(dest_dir)
platform_utils.symlink(relSrc, absDest)
except IOError:
_error('Cannot link file %s to %s', relSrc, absDest)
def _Link(self):
"""Link the self.rel_src_to_dest and self.abs_dest. Handles wild cards
on the src linking all of the files in the source in to the destination
directory.
"""
# We use the absSrc to handle the situation where the current directory
# is not the root of the repo
absSrc = os.path.join(self.git_worktree, self.src)
if os.path.exists(absSrc):
# Entity exists so just a simple one to one link operation
self.__linkIt(self.src_rel_to_dest, self.abs_dest)
else:
# Entity doesn't exist assume there is a wild card
absDestDir = self.abs_dest
if os.path.exists(absDestDir) and not platform_utils.isdir(absDestDir):
_error('Link error: src with wildcard, %s must be a directory',
absDestDir)
else:
absSrcFiles = glob.glob(absSrc)
for absSrcFile in absSrcFiles:
# Create a releative path from source dir to destination dir
absSrcDir = os.path.dirname(absSrcFile)
relSrcDir = os.path.relpath(absSrcDir, absDestDir)
# Get the source file name
srcFile = os.path.basename(absSrcFile)
# Now form the final full paths to srcFile. They will be
# absolute for the desintaiton and relative for the srouce.
absDest = os.path.join(absDestDir, srcFile)
relSrc = os.path.join(relSrcDir, srcFile)
self.__linkIt(relSrc, absDest)
class RemoteSpec(object):
def __init__(self,
name,
url=None,
pushUrl=None,
review=None,
revision=None,
orig_name=None,
fetchUrl=None):
self.name = name
self.url = url
self.pushUrl = pushUrl
self.review = review
self.revision = revision
self.orig_name = orig_name
self.fetchUrl = fetchUrl
class RepoHook(object):
"""A RepoHook contains information about a script to run as a hook.
Hooks are used to run a python script before running an upload (for instance,
to run presubmit checks). Eventually, we may have hooks for other actions.
This shouldn't be confused with files in the 'repo/hooks' directory. Those
files are copied into each '.git/hooks' folder for each project. Repo-level
hooks are associated instead with repo actions.
Hooks are always python. When a hook is run, we will load the hook into the
interpreter and execute its main() function.
"""
def __init__(self,
hook_type,
hooks_project,
topdir,
manifest_url,
abort_if_user_denies=False):
"""RepoHook constructor.
Params:
hook_type: A string representing the type of hook. This is also used
to figure out the name of the file containing the hook. For
example: 'pre-upload'.
hooks_project: The project containing the repo hooks. If you have a
manifest, this is manifest.repo_hooks_project. OK if this is None,
which will make the hook a no-op.
topdir: Repo's top directory (the one containing the .repo directory).
Scripts will run with CWD as this directory. If you have a manifest,
this is manifest.topdir
manifest_url: The URL to the manifest git repo.
abort_if_user_denies: If True, we'll throw a HookError() if the user
doesn't allow us to run the hook.
"""
self._hook_type = hook_type
self._hooks_project = hooks_project
self._manifest_url = manifest_url
self._topdir = topdir
self._abort_if_user_denies = abort_if_user_denies
# Store the full path to the script for convenience.
if self._hooks_project:
self._script_fullpath = os.path.join(self._hooks_project.worktree,
self._hook_type + '.py')
else:
self._script_fullpath = None
def _GetHash(self):
"""Return a hash of the contents of the hooks directory.
We'll just use git to do this. This hash has the property that if anything
changes in the directory we will return a different has.
SECURITY CONSIDERATION:
This hash only represents the contents of files in the hook directory, not
any other files imported or called by hooks. Changes to imported files
can change the script behavior without affecting the hash.
Returns:
A string representing the hash. This will always be ASCII so that it can
be printed to the user easily.
"""
assert self._hooks_project, "Must have hooks to calculate their hash."
# We will use the work_git object rather than just calling GetRevisionId().
# That gives us a hash of the latest checked in version of the files that
# the user will actually be executing. Specifically, GetRevisionId()
# doesn't appear to change even if a user checks out a different version
# of the hooks repo (via git checkout) nor if a user commits their own revs.
#
# NOTE: Local (non-committed) changes will not be factored into this hash.
# I think this is OK, since we're really only worried about warning the user
# about upstream changes.
return self._hooks_project.work_git.rev_parse('HEAD')
def _GetMustVerb(self):
"""Return 'must' if the hook is required; 'should' if not."""
if self._abort_if_user_denies:
return 'must'
else:
return 'should'
def _CheckForHookApproval(self):
"""Check to see whether this hook has been approved.
We'll accept approval of manifest URLs if they're using secure transports.
This way the user can say they trust the manifest hoster. For insecure
hosts, we fall back to checking the hash of the hooks repo.
Note that we ask permission for each individual hook even though we use
the hash of all hooks when detecting changes. We'd like the user to be
able to approve / deny each hook individually. We only use the hash of all
hooks because there is no other easy way to detect changes to local imports.
Returns:
True if this hook is approved to run; False otherwise.
Raises:
HookError: Raised if the user doesn't approve and abort_if_user_denies
was passed to the consturctor.
"""
if self._ManifestUrlHasSecureScheme():
return self._CheckForHookApprovalManifest()
else:
return self._CheckForHookApprovalHash()
def _CheckForHookApprovalHelper(self, subkey, new_val, main_prompt,
changed_prompt):
"""Check for approval for a particular attribute and hook.
Args:
subkey: The git config key under [repo.hooks.<hook_type>] to store the
last approved string.
new_val: The new value to compare against the last approved one.
main_prompt: Message to display to the user to ask for approval.
changed_prompt: Message explaining why we're re-asking for approval.
Returns:
True if this hook is approved to run; False otherwise.
Raises:
HookError: Raised if the user doesn't approve and abort_if_user_denies
was passed to the consturctor.
"""
hooks_config = self._hooks_project.config
git_approval_key = 'repo.hooks.%s.%s' % (self._hook_type, subkey)
# Get the last value that the user approved for this hook; may be None.
old_val = hooks_config.GetString(git_approval_key)
if old_val is not None:
# User previously approved hook and asked not to be prompted again.
if new_val == old_val:
# Approval matched. We're done.
return True
else:
# Give the user a reason why we're prompting, since they last told
# us to "never ask again".
prompt = 'WARNING: %s\n\n' % (changed_prompt,)
else:
prompt = ''
# Prompt the user if we're not on a tty; on a tty we'll assume "no".
if sys.stdout.isatty():
prompt += main_prompt + ' (yes/always/NO)? '
response = input(prompt).lower()
print()
# User is doing a one-time approval.
if response in ('y', 'yes'):
return True
elif response == 'always':
hooks_config.SetString(git_approval_key, new_val)
return True
# For anything else, we'll assume no approval.
if self._abort_if_user_denies:
raise HookError('You must allow the %s hook or use --no-verify.' %
self._hook_type)
return False
def _ManifestUrlHasSecureScheme(self):
"""Check if the URI for the manifest is a secure transport."""
secure_schemes = ('file', 'https', 'ssh', 'persistent-https', 'sso', 'rpc')
parse_results = urllib.parse.urlparse(self._manifest_url)
return parse_results.scheme in secure_schemes
def _CheckForHookApprovalManifest(self):
"""Check whether the user has approved this manifest host.
Returns:
True if this hook is approved to run; False otherwise.
"""
return self._CheckForHookApprovalHelper(
'approvedmanifest',
self._manifest_url,
'Run hook scripts from %s' % (self._manifest_url,),
'Manifest URL has changed since %s was allowed.' % (self._hook_type,))
def _CheckForHookApprovalHash(self):
"""Check whether the user has approved the hooks repo.
Returns:
True if this hook is approved to run; False otherwise.
"""
prompt = ('Repo %s run the script:\n'
' %s\n'
'\n'
'Do you want to allow this script to run')
return self._CheckForHookApprovalHelper(
'approvedhash',
self._GetHash(),
prompt % (self._GetMustVerb(), self._script_fullpath),
'Scripts have changed since %s was allowed.' % (self._hook_type,))
def _ExecuteHook(self, **kwargs):
"""Actually execute the given hook.
This will run the hook's 'main' function in our python interpreter.
Args:
kwargs: Keyword arguments to pass to the hook. These are often specific
to the hook type. For instance, pre-upload hooks will contain
a project_list.
"""
# Keep sys.path and CWD stashed away so that we can always restore them
# upon function exit.
orig_path = os.getcwd()
orig_syspath = sys.path
try:
# Always run hooks with CWD as topdir.
os.chdir(self._topdir)
# Put the hook dir as the first item of sys.path so hooks can do
# relative imports. We want to replace the repo dir as [0] so
# hooks can't import repo files.
sys.path = [os.path.dirname(self._script_fullpath)] + sys.path[1:]
# Exec, storing global context in the context dict. We catch exceptions
# and convert to a HookError w/ just the failing traceback.
context = {'__file__': self._script_fullpath}
try:
exec(compile(open(self._script_fullpath).read(),
self._script_fullpath, 'exec'), context)
except Exception:
raise HookError('%s\nFailed to import %s hook; see traceback above.' %
(traceback.format_exc(), self._hook_type))
# Running the script should have defined a main() function.
if 'main' not in context:
raise HookError('Missing main() in: "%s"' % self._script_fullpath)
# Add 'hook_should_take_kwargs' to the arguments to be passed to main.
# We don't actually want hooks to define their main with this argument--
# it's there to remind them that their hook should always take **kwargs.
# For instance, a pre-upload hook should be defined like:
# def main(project_list, **kwargs):
#
# This allows us to later expand the API without breaking old hooks.
kwargs = kwargs.copy()
kwargs['hook_should_take_kwargs'] = True
# Call the main function in the hook. If the hook should cause the
# build to fail, it will raise an Exception. We'll catch that convert
# to a HookError w/ just the failing traceback.
try:
context['main'](**kwargs)
except Exception:
raise HookError('%s\nFailed to run main() for %s hook; see traceback '
'above.' % (traceback.format_exc(),
self._hook_type))
finally:
# Restore sys.path and CWD.
sys.path = orig_syspath
os.chdir(orig_path)
def Run(self, user_allows_all_hooks, **kwargs):
"""Run the hook.
If the hook doesn't exist (because there is no hooks project or because
this particular hook is not enabled), this is a no-op.
Args:
user_allows_all_hooks: If True, we will never prompt about running the
hook--we'll just assume it's OK to run it.
kwargs: Keyword arguments to pass to the hook. These are often specific
to the hook type. For instance, pre-upload hooks will contain
a project_list.
Raises:
HookError: If there was a problem finding the hook or the user declined
to run a required hook (from _CheckForHookApproval).
"""
# No-op if there is no hooks project or if hook is disabled.
if ((not self._hooks_project) or (self._hook_type not in
self._hooks_project.enabled_repo_hooks)):
return
# Bail with a nice error if we can't find the hook.
if not os.path.isfile(self._script_fullpath):
raise HookError('Couldn\'t find repo hook: "%s"' % self._script_fullpath)
# Make sure the user is OK with running the hook.
if (not user_allows_all_hooks) and (not self._CheckForHookApproval()):
return
# Run the hook with the same version of python we're using.
self._ExecuteHook(**kwargs)
class Project(object):
# These objects can be shared between several working trees.
shareable_files = ['description', 'info']
shareable_dirs = ['hooks', 'objects', 'rr-cache', 'svn']
# These objects can only be used by a single working tree.
working_tree_files = ['config', 'packed-refs', 'shallow']
working_tree_dirs = ['logs', 'refs']
def __init__(self,
manifest,
name,
remote,
gitdir,
objdir,
worktree,
relpath,
revisionExpr,
revisionId,
rebase=True,
groups=None,
sync_c=False,
sync_s=False,
sync_tags=True,
clone_depth=None,
upstream=None,
parent=None,
is_derived=False,
dest_branch=None,
optimized_fetch=False,
old_revision=None):
"""Init a Project object.
Args:
manifest: The XmlManifest object.
name: The `name` attribute of manifest.xml's project element.
remote: RemoteSpec object specifying its remote's properties.
gitdir: Absolute path of git directory.
objdir: Absolute path of directory to store git objects.
worktree: Absolute path of git working tree.
relpath: Relative path of git working tree to repo's top directory.
revisionExpr: The `revision` attribute of manifest.xml's project element.
revisionId: git commit id for checking out.
rebase: The `rebase` attribute of manifest.xml's project element.
groups: The `groups` attribute of manifest.xml's project element.
sync_c: The `sync-c` attribute of manifest.xml's project element.
sync_s: The `sync-s` attribute of manifest.xml's project element.
sync_tags: The `sync-tags` attribute of manifest.xml's project element.
upstream: The `upstream` attribute of manifest.xml's project element.
parent: The parent Project object.
is_derived: False if the project was explicitly defined in the manifest;
True if the project is a discovered submodule.
dest_branch: The branch to which to push changes for review by default.
optimized_fetch: If True, when a project is set to a sha1 revision, only
fetch from the remote if the sha1 is not present locally.
old_revision: saved git commit id for open GITC projects.
"""
self.manifest = manifest
self.name = name
self.remote = remote
self.gitdir = gitdir.replace('\\', '/')
self.objdir = objdir.replace('\\', '/')
if worktree:
self.worktree = os.path.normpath(worktree).replace('\\', '/')
else:
self.worktree = None
self.relpath = relpath
self.revisionExpr = revisionExpr
if revisionId is None \
and revisionExpr \
and IsId(revisionExpr):
self.revisionId = revisionExpr
else:
self.revisionId = revisionId
self.rebase = rebase
self.groups = groups
self.sync_c = sync_c
self.sync_s = sync_s
self.sync_tags = sync_tags
self.clone_depth = clone_depth
self.upstream = upstream
self.parent = parent
self.is_derived = is_derived
self.optimized_fetch = optimized_fetch
self.subprojects = []
self.snapshots = {}
self.copyfiles = []
self.linkfiles = []
self.annotations = []
self.config = GitConfig.ForRepository(gitdir=self.gitdir,
defaults=self.manifest.globalConfig)
if self.worktree:
self.work_git = self._GitGetByExec(self, bare=False, gitdir=gitdir)
else:
self.work_git = None
self.bare_git = self._GitGetByExec(self, bare=True, gitdir=gitdir)
self.bare_ref = GitRefs(gitdir)
self.bare_objdir = self._GitGetByExec(self, bare=True, gitdir=objdir)
self.dest_branch = dest_branch
self.old_revision = old_revision
# This will be filled in if a project is later identified to be the
# project containing repo hooks.
self.enabled_repo_hooks = []
@property
def Derived(self):
return self.is_derived
@property
def Exists(self):
return platform_utils.isdir(self.gitdir) and platform_utils.isdir(self.objdir)
@property
def CurrentBranch(self):
"""Obtain the name of the currently checked out branch.
The branch name omits the 'refs/heads/' prefix.
None is returned if the project is on a detached HEAD.
"""
b = self.work_git.GetHead()
if b.startswith(R_HEADS):
return b[len(R_HEADS):]
return None
def IsRebaseInProgress(self):
w = self.worktree
g = os.path.join(w, '.git')
return os.path.exists(os.path.join(g, 'rebase-apply')) \
or os.path.exists(os.path.join(g, 'rebase-merge')) \
or os.path.exists(os.path.join(w, '.dotest'))
def IsDirty(self, consider_untracked=True):
"""Is the working directory modified in some way?
"""
self.work_git.update_index('-q',
'--unmerged',
'--ignore-missing',
'--refresh')
if self.work_git.DiffZ('diff-index', '-M', '--cached', HEAD):
return True
if self.work_git.DiffZ('diff-files'):
return True
if consider_untracked and self.work_git.LsOthers():
return True
return False
_userident_name = None
_userident_email = None
@property
def UserName(self):
"""Obtain the user's personal name.
"""
if self._userident_name is None:
self._LoadUserIdentity()
return self._userident_name
@property
def UserEmail(self):
"""Obtain the user's email address. This is very likely
to be their Gerrit login.
"""
if self._userident_email is None:
self._LoadUserIdentity()
return self._userident_email
def _LoadUserIdentity(self):
u = self.bare_git.var('GIT_COMMITTER_IDENT')
m = re.compile("^(.*) <([^>]*)> ").match(u)
if m:
self._userident_name = m.group(1)
self._userident_email = m.group(2)
else:
self._userident_name = ''
self._userident_email = ''
def GetRemote(self, name):
"""Get the configuration for a single remote.
"""
return self.config.GetRemote(name)
def GetBranch(self, name):
"""Get the configuration for a single branch.
"""
return self.config.GetBranch(name)
def GetBranches(self):
"""Get all existing local branches.
"""
current = self.CurrentBranch
all_refs = self._allrefs
heads = {}
for name, ref_id in all_refs.items():
if name.startswith(R_HEADS):
name = name[len(R_HEADS):]
b = self.GetBranch(name)
b.current = name == current
b.published = None
b.revision = ref_id
heads[name] = b
for name, ref_id in all_refs.items():
if name.startswith(R_PUB):
name = name[len(R_PUB):]
b = heads.get(name)
if b:
b.published = ref_id
return heads
def MatchesGroups(self, manifest_groups):
"""Returns true if the manifest groups specified at init should cause
this project to be synced.
Prefixing a manifest group with "-" inverts the meaning of a group.
All projects are implicitly labelled with "all".
labels are resolved in order. In the example case of
project_groups: "all,group1,group2"
manifest_groups: "-group1,group2"
the project will be matched.
The special manifest group "default" will match any project that
does not have the special project group "notdefault"
"""
expanded_manifest_groups = manifest_groups or ['default']
expanded_project_groups = ['all'] + (self.groups or [])
if 'notdefault' not in expanded_project_groups:
expanded_project_groups += ['default']
matched = False
for group in expanded_manifest_groups:
if group.startswith('-') and group[1:] in expanded_project_groups:
matched = False
elif group in expanded_project_groups:
matched = True
return matched
# Status Display ##
def UncommitedFiles(self, get_all=True):
"""Returns a list of strings, uncommitted files in the git tree.
Args:
get_all: a boolean, if True - get information about all different
uncommitted files. If False - return as soon as any kind of
uncommitted files is detected.
"""
details = []
self.work_git.update_index('-q',
'--unmerged',
'--ignore-missing',
'--refresh')
if self.IsRebaseInProgress():
details.append("rebase in progress")
if not get_all:
return details
changes = self.work_git.DiffZ('diff-index', '--cached', HEAD).keys()
if changes:
details.extend(changes)
if not get_all:
return details
changes = self.work_git.DiffZ('diff-files').keys()
if changes:
details.extend(changes)
if not get_all:
return details
changes = self.work_git.LsOthers()
if changes:
details.extend(changes)
return details
def HasChanges(self):
"""Returns true if there are uncommitted changes.
"""
if self.UncommitedFiles(get_all=False):
return True
else:
return False
def PrintWorkTreeStatus(self, output_redir=None, quiet=False):
"""Prints the status of the repository to stdout.
Args:
output: If specified, redirect the output to this object.
quiet: If True then only print the project name. Do not print
the modified files, branch name, etc.
"""
if not platform_utils.isdir(self.worktree):
if output_redir is None:
output_redir = sys.stdout
print(file=output_redir)
print('project %s/' % self.relpath, file=output_redir)
print(' missing (run "repo sync")', file=output_redir)
return
self.work_git.update_index('-q',
'--unmerged',
'--ignore-missing',
'--refresh')
rb = self.IsRebaseInProgress()
di = self.work_git.DiffZ('diff-index', '-M', '--cached', HEAD)
df = self.work_git.DiffZ('diff-files')
do = self.work_git.LsOthers()
if not rb and not di and not df and not do and not self.CurrentBranch:
return 'CLEAN'
out = StatusColoring(self.config)
if output_redir is not None:
out.redirect(output_redir)
out.project('project %-40s', self.relpath + '/ ')
if quiet:
out.nl()
return 'DIRTY'
branch = self.CurrentBranch
if branch is None:
out.nobranch('(*** NO BRANCH ***)')
else:
out.branch('branch %s', branch)
out.nl()
if rb:
out.important('prior sync failed; rebase still in progress')
out.nl()
paths = list()
paths.extend(di.keys())
paths.extend(df.keys())
paths.extend(do)
for p in sorted(set(paths)):
try:
i = di[p]
except KeyError:
i = None
try:
f = df[p]
except KeyError:
f = None
if i:
i_status = i.status.upper()
else:
i_status = '-'
if f:
f_status = f.status.lower()
else:
f_status = '-'
if i and i.src_path:
line = ' %s%s\t%s => %s (%s%%)' % (i_status, f_status,
i.src_path, p, i.level)
else:
line = ' %s%s\t%s' % (i_status, f_status, p)
if i and not f:
out.added('%s', line)
elif (i and f) or (not i and f):
out.changed('%s', line)
elif not i and not f:
out.untracked('%s', line)
else:
out.write('%s', line)
out.nl()
return 'DIRTY'
def PrintWorkTreeDiff(self, absolute_paths=False):
"""Prints the status of the repository to stdout.
"""
out = DiffColoring(self.config)
cmd = ['diff']
if out.is_on:
cmd.append('--color')
cmd.append(HEAD)
if absolute_paths:
cmd.append('--src-prefix=a/%s/' % self.relpath)
cmd.append('--dst-prefix=b/%s/' % self.relpath)
cmd.append('--')
p = GitCommand(self,
cmd,
capture_stdout=True,
capture_stderr=True)
has_diff = False
for line in p.process.stdout:
if not has_diff:
out.nl()
out.project('project %s/' % self.relpath)
out.nl()
has_diff = True
print(line[:-1])
p.Wait()
# Publish / Upload ##
def WasPublished(self, branch, all_refs=None):
"""Was the branch published (uploaded) for code review?
If so, returns the SHA-1 hash of the last published
state for the branch.
"""
key = R_PUB + branch
if all_refs is None:
try:
return self.bare_git.rev_parse(key)
except GitError:
return None
else:
try:
return all_refs[key]
except KeyError:
return None
def CleanPublishedCache(self, all_refs=None):
"""Prunes any stale published refs.
"""
if all_refs is None:
all_refs = self._allrefs
heads = set()
canrm = {}
for name, ref_id in all_refs.items():
if name.startswith(R_HEADS):
heads.add(name)
elif name.startswith(R_PUB):
canrm[name] = ref_id
for name, ref_id in canrm.items():
n = name[len(R_PUB):]
if R_HEADS + n not in heads:
self.bare_git.DeleteRef(name, ref_id)
def GetUploadableBranches(self, selected_branch=None):
"""List any branches which can be uploaded for review.
"""
heads = {}
pubed = {}
for name, ref_id in self._allrefs.items():
if name.startswith(R_HEADS):
heads[name[len(R_HEADS):]] = ref_id
elif name.startswith(R_PUB):
pubed[name[len(R_PUB):]] = ref_id
ready = []
for branch, ref_id in heads.items():
if branch in pubed and pubed[branch] == ref_id:
continue
if selected_branch and branch != selected_branch:
continue
rb = self.GetUploadableBranch(branch)
if rb:
ready.append(rb)
return ready
def GetUploadableBranch(self, branch_name):
"""Get a single uploadable branch, or None.
"""
branch = self.GetBranch(branch_name)
base = branch.LocalMerge
if branch.LocalMerge:
rb = ReviewableBranch(self, branch, base)
if rb.commits:
return rb
return None
def UploadForReview(self, branch=None,
people=([], []),
auto_topic=False,
draft=False,
private=False,
notify=None,
wip=False,
dest_branch=None,
validate_certs=True,
push_options=None):
"""Uploads the named branch for code review.
"""
if branch is None:
branch = self.CurrentBranch
if branch is None:
raise GitError('not currently on a branch')
branch = self.GetBranch(branch)
if not branch.LocalMerge:
raise GitError('branch %s does not track a remote' % branch.name)
if not branch.remote.review:
raise GitError('remote %s has no review url' % branch.remote.name)
if dest_branch is None:
dest_branch = self.dest_branch
if dest_branch is None:
dest_branch = branch.merge
if not dest_branch.startswith(R_HEADS):
dest_branch = R_HEADS + dest_branch
if not branch.remote.projectname:
branch.remote.projectname = self.name
branch.remote.Save()
url = branch.remote.ReviewUrl(self.UserEmail, validate_certs)
if url is None:
raise UploadError('review not configured')
cmd = ['push']
if url.startswith('ssh://'):
cmd.append('--receive-pack=gerrit receive-pack')
for push_option in (push_options or []):
cmd.append('-o')
cmd.append(push_option)
cmd.append(url)
if dest_branch.startswith(R_HEADS):
dest_branch = dest_branch[len(R_HEADS):]
upload_type = 'for'
if draft:
upload_type = 'drafts'
ref_spec = '%s:refs/%s/%s' % (R_HEADS + branch.name, upload_type,
dest_branch)
opts = []
if auto_topic:
opts += ['topic=' + branch.name]
opts += ['r=%s' % p for p in people[0]]
opts += ['cc=%s' % p for p in people[1]]
if notify:
opts += ['notify=' + notify]
if private:
opts += ['private']
if wip:
opts += ['wip']
if opts:
ref_spec = ref_spec + '%' + ','.join(opts)
cmd.append(ref_spec)
if GitCommand(self, cmd, bare=True).Wait() != 0:
raise UploadError('Upload failed')
msg = "posted to %s for %s" % (branch.remote.review, dest_branch)
self.bare_git.UpdateRef(R_PUB + branch.name,
R_HEADS + branch.name,
message=msg)
# Sync ##
def _ExtractArchive(self, tarpath, path=None):
"""Extract the given tar on its current location
Args:
- tarpath: The path to the actual tar file
"""
try:
with tarfile.open(tarpath, 'r') as tar:
tar.extractall(path=path)
return True
except (IOError, tarfile.TarError) as e:
_error("Cannot extract archive %s: %s", tarpath, str(e))
return False
def Sync_NetworkHalf(self,
quiet=False,
is_new=None,
current_branch_only=False,
force_sync=False,
clone_bundle=True,
no_tags=False,
archive=False,
optimized_fetch=False,
prune=False,
submodules=False):
"""Perform only the network IO portion of the sync process.
Local working directory/branch state is not affected.
"""
if archive and not isinstance(self, MetaProject):
if self.remote.url.startswith(('http://', 'https://')):
_error("%s: Cannot fetch archives from http/https remotes.", self.name)
return False
name = self.relpath.replace('\\', '/')
name = name.replace('/', '_')
tarpath = '%s.tar' % name
topdir = self.manifest.topdir
try:
self._FetchArchive(tarpath, cwd=topdir)
except GitError as e:
_error('%s', e)
return False
# From now on, we only need absolute tarpath
tarpath = os.path.join(topdir, tarpath)
if not self._ExtractArchive(tarpath, path=topdir):
return False
try:
platform_utils.remove(tarpath)
except OSError as e:
_warn("Cannot remove archive %s: %s", tarpath, str(e))
self._CopyAndLinkFiles()
return True
if is_new is None:
is_new = not self.Exists
if is_new:
self._InitGitDir(force_sync=force_sync)
else:
self._UpdateHooks()
self._InitRemote()
if is_new:
alt = os.path.join(self.gitdir, 'objects/info/alternates')
try:
fd = open(alt)
try:
# This works for both absolute and relative alternate directories.
alt_dir = os.path.join(self.objdir, 'objects', fd.readline().rstrip())
finally:
fd.close()
except IOError:
alt_dir = None
else:
alt_dir = None
if clone_bundle \
and alt_dir is None \
and self._ApplyCloneBundle(initial=is_new, quiet=quiet):
is_new = False
if not current_branch_only:
if self.sync_c:
current_branch_only = True
elif not self.manifest._loaded:
# Manifest cannot check defaults until it syncs.
current_branch_only = False
elif self.manifest.default.sync_c:
current_branch_only = True
if not no_tags:
if not self.sync_tags:
no_tags = True
if self.clone_depth:
depth = self.clone_depth
else:
depth = self.manifest.manifestProject.config.GetString('repo.depth')
need_to_fetch = not (optimized_fetch and
(ID_RE.match(self.revisionExpr) and
self._CheckForImmutableRevision()))
if (need_to_fetch and
not self._RemoteFetch(initial=is_new, quiet=quiet, alt_dir=alt_dir,
current_branch_only=current_branch_only,
no_tags=no_tags, prune=prune, depth=depth,
submodules=submodules, force_sync=force_sync)):
return False
mp = self.manifest.manifestProject
dissociate = mp.config.GetBoolean('repo.dissociate')
if dissociate:
alternates_file = os.path.join(self.gitdir, 'objects/info/alternates')
if os.path.exists(alternates_file):
cmd = ['repack', '-a', '-d']
if GitCommand(self, cmd, bare=True).Wait() != 0:
return False
platform_utils.remove(alternates_file)
if self.worktree:
self._InitMRef()
else:
self._InitMirrorHead()
try:
platform_utils.remove(os.path.join(self.gitdir, 'FETCH_HEAD'))
except OSError:
pass
return True
def PostRepoUpgrade(self):
self._InitHooks()
def _CopyAndLinkFiles(self):
if self.manifest.isGitcClient:
return
for copyfile in self.copyfiles:
copyfile._Copy()
for linkfile in self.linkfiles:
linkfile._Link()
def GetCommitRevisionId(self):
"""Get revisionId of a commit.
Use this method instead of GetRevisionId to get the id of the commit rather
than the id of the current git object (for example, a tag)
"""
if not self.revisionExpr.startswith(R_TAGS):
return self.GetRevisionId(self._allrefs)
try:
return self.bare_git.rev_list(self.revisionExpr, '-1')[0]
except GitError:
raise ManifestInvalidRevisionError('revision %s in %s not found' %
(self.revisionExpr, self.name))
def GetRevisionId(self, all_refs=None):
if self.revisionId:
return self.revisionId
rem = self.GetRemote(self.remote.name)
rev = rem.ToLocal(self.revisionExpr)
if all_refs is not None and rev in all_refs:
return all_refs[rev]
try:
return self.bare_git.rev_parse('--verify', '%s^0' % rev)
except GitError:
raise ManifestInvalidRevisionError('revision %s in %s not found' %
(self.revisionExpr, self.name))
def Sync_LocalHalf(self, syncbuf, force_sync=False, submodules=False):
"""Perform only the local IO portion of the sync process.
Network access is not required.
"""
self._InitWorkTree(force_sync=force_sync, submodules=submodules)
all_refs = self.bare_ref.all
self.CleanPublishedCache(all_refs)
revid = self.GetRevisionId(all_refs)
def _doff():
self._FastForward(revid)
self._CopyAndLinkFiles()
def _dosubmodules():
self._SyncSubmodules(quiet=True)
head = self.work_git.GetHead()
if head.startswith(R_HEADS):
branch = head[len(R_HEADS):]
try:
head = all_refs[head]
except KeyError:
head = None
else:
branch = None
if branch is None or syncbuf.detach_head:
# Currently on a detached HEAD. The user is assumed to
# not have any local modifications worth worrying about.
#
if self.IsRebaseInProgress():
syncbuf.fail(self, _PriorSyncFailedError())
return
if head == revid:
# No changes; don't do anything further.
# Except if the head needs to be detached
#
if not syncbuf.detach_head:
# The copy/linkfile config may have changed.
self._CopyAndLinkFiles()
return
else:
lost = self._revlist(not_rev(revid), HEAD)
if lost:
syncbuf.info(self, "discarding %d commits", len(lost))
try:
self._Checkout(revid, quiet=True)
if submodules:
self._SyncSubmodules(quiet=True)
except GitError as e:
syncbuf.fail(self, e)
return
self._CopyAndLinkFiles()
return
if head == revid:
# No changes; don't do anything further.
#
# The copy/linkfile config may have changed.
self._CopyAndLinkFiles()
return
branch = self.GetBranch(branch)
if not branch.LocalMerge:
# The current branch has no tracking configuration.
# Jump off it to a detached HEAD.
#
syncbuf.info(self,
"leaving %s; does not track upstream",
branch.name)
try:
self._Checkout(revid, quiet=True)
if submodules:
self._SyncSubmodules(quiet=True)
except GitError as e:
syncbuf.fail(self, e)
return
self._CopyAndLinkFiles()
return
upstream_gain = self._revlist(not_rev(HEAD), revid)
pub = self.WasPublished(branch.name, all_refs)
if pub:
not_merged = self._revlist(not_rev(revid), pub)
if not_merged:
if upstream_gain:
# The user has published this branch and some of those
# commits are not yet merged upstream. We do not want
# to rewrite the published commits so we punt.
#
syncbuf.fail(self,
"branch %s is published (but not merged) and is now "
"%d commits behind" % (branch.name, len(upstream_gain)))
return
elif pub == head:
# All published commits are merged, and thus we are a
# strict subset. We can fast-forward safely.
#
syncbuf.later1(self, _doff)
if submodules:
syncbuf.later1(self, _dosubmodules)
return
# Examine the local commits not in the remote. Find the
# last one attributed to this user, if any.
#
local_changes = self._revlist(not_rev(revid), HEAD, format='%H %ce')
last_mine = None
cnt_mine = 0
for commit in local_changes:
commit_id, committer_email = commit.decode('utf-8').split(' ', 1)
if committer_email == self.UserEmail:
last_mine = commit_id
cnt_mine += 1
if not upstream_gain and cnt_mine == len(local_changes):
return
if self.IsDirty(consider_untracked=False):
syncbuf.fail(self, _DirtyError())
return
# If the upstream switched on us, warn the user.
#
if branch.merge != self.revisionExpr:
if branch.merge and self.revisionExpr:
syncbuf.info(self,
'manifest switched %s...%s',
branch.merge,
self.revisionExpr)
elif branch.merge:
syncbuf.info(self,
'manifest no longer tracks %s',
branch.merge)
if cnt_mine < len(local_changes):
# Upstream rebased. Not everything in HEAD
# was created by this user.
#
syncbuf.info(self,
"discarding %d commits removed from upstream",
len(local_changes) - cnt_mine)
branch.remote = self.GetRemote(self.remote.name)
if not ID_RE.match(self.revisionExpr):
# in case of manifest sync the revisionExpr might be a SHA1
branch.merge = self.revisionExpr
if not branch.merge.startswith('refs/'):
branch.merge = R_HEADS + branch.merge
branch.Save()
if cnt_mine > 0 and self.rebase:
def _docopyandlink():
self._CopyAndLinkFiles()
def _dorebase():
self._Rebase(upstream='%s^1' % last_mine, onto=revid)
syncbuf.later2(self, _dorebase)
if submodules:
syncbuf.later2(self, _dosubmodules)
syncbuf.later2(self, _docopyandlink)
elif local_changes:
try:
self._ResetHard(revid)
if submodules:
self._SyncSubmodules(quiet=True)
self._CopyAndLinkFiles()
except GitError as e:
syncbuf.fail(self, e)
return
else:
syncbuf.later1(self, _doff)
if submodules:
syncbuf.later1(self, _dosubmodules)
def AddCopyFile(self, src, dest, absdest):
# dest should already be an absolute path, but src is project relative
# make src an absolute path
abssrc = os.path.join(self.worktree, src)
self.copyfiles.append(_CopyFile(src, dest, abssrc, absdest))
def AddLinkFile(self, src, dest, absdest):
# dest should already be an absolute path, but src is project relative
# make src relative path to dest
absdestdir = os.path.dirname(absdest)
relsrc = os.path.relpath(os.path.join(self.worktree, src), absdestdir)
self.linkfiles.append(_LinkFile(self.worktree, src, dest, relsrc, absdest))
def AddAnnotation(self, name, value, keep):
self.annotations.append(_Annotation(name, value, keep))
def DownloadPatchSet(self, change_id, patch_id):
"""Download a single patch set of a single change to FETCH_HEAD.
"""
remote = self.GetRemote(self.remote.name)
cmd = ['fetch', remote.name]
cmd.append('refs/changes/%2.2d/%d/%d'
% (change_id % 100, change_id, patch_id))
if GitCommand(self, cmd, bare=True).Wait() != 0:
return None
return DownloadedChange(self,
self.GetRevisionId(),
change_id,
patch_id,
self.bare_git.rev_parse('FETCH_HEAD'))
# Branch Management ##
def StartBranch(self, name, branch_merge=''):
"""Create a new branch off the manifest's revision.
"""
if not branch_merge:
branch_merge = self.revisionExpr
head = self.work_git.GetHead()
if head == (R_HEADS + name):
return True
all_refs = self.bare_ref.all
if R_HEADS + name in all_refs:
return GitCommand(self,
['checkout', name, '--'],
capture_stdout=True,
capture_stderr=True).Wait() == 0
branch = self.GetBranch(name)
branch.remote = self.GetRemote(self.remote.name)
branch.merge = branch_merge
if not branch.merge.startswith('refs/') and not ID_RE.match(branch_merge):
branch.merge = R_HEADS + branch_merge
revid = self.GetRevisionId(all_refs)
if head.startswith(R_HEADS):
try:
head = all_refs[head]
except KeyError:
head = None
if revid and head and revid == head:
ref = os.path.join(self.gitdir, R_HEADS + name)
try:
os.makedirs(os.path.dirname(ref))
except OSError:
pass
_lwrite(ref, '%s\n' % revid)
_lwrite(os.path.join(self.worktree, '.git', HEAD),
'ref: %s%s\n' % (R_HEADS, name))
branch.Save()
return True
if GitCommand(self,
['checkout', '-b', branch.name, revid],
capture_stdout=True,
capture_stderr=True).Wait() == 0:
branch.Save()
return True
return False
def CheckoutBranch(self, name):
"""Checkout a local topic branch.
Args:
name: The name of the branch to checkout.
Returns:
True if the checkout succeeded; False if it didn't; None if the branch
didn't exist.
"""
rev = R_HEADS + name
head = self.work_git.GetHead()
if head == rev:
# Already on the branch
#
return True
all_refs = self.bare_ref.all
try:
revid = all_refs[rev]
except KeyError:
# Branch does not exist in this project
#
return None
if head.startswith(R_HEADS):
try:
head = all_refs[head]
except KeyError:
head = None
if head == revid:
# Same revision; just update HEAD to point to the new
# target branch, but otherwise take no other action.
#
_lwrite(os.path.join(self.worktree, '.git', HEAD),
'ref: %s%s\n' % (R_HEADS, name))
return True
return GitCommand(self,
['checkout', name, '--'],
capture_stdout=True,
capture_stderr=True).Wait() == 0
def AbandonBranch(self, name):
"""Destroy a local topic branch.
Args:
name: The name of the branch to abandon.
Returns:
True if the abandon succeeded; False if it didn't; None if the branch
didn't exist.
"""
rev = R_HEADS + name
all_refs = self.bare_ref.all
if rev not in all_refs:
# Doesn't exist
return None
head = self.work_git.GetHead()
if head == rev:
# We can't destroy the branch while we are sitting
# on it. Switch to a detached HEAD.
#
head = all_refs[head]
revid = self.GetRevisionId(all_refs)
if head == revid:
_lwrite(os.path.join(self.worktree, '.git', HEAD),
'%s\n' % revid)
else:
self._Checkout(revid, quiet=True)
return GitCommand(self,
['branch', '-D', name],
capture_stdout=True,
capture_stderr=True).Wait() == 0
def PruneHeads(self):
"""Prune any topic branches already merged into upstream.
"""
cb = self.CurrentBranch
kill = []
left = self._allrefs
for name in left.keys():
if name.startswith(R_HEADS):
name = name[len(R_HEADS):]
if cb is None or name != cb:
kill.append(name)
rev = self.GetRevisionId(left)
if cb is not None \
and not self._revlist(HEAD + '...' + rev) \
and not self.IsDirty(consider_untracked=False):
self.work_git.DetachHead(HEAD)
kill.append(cb)
if kill:
old = self.bare_git.GetHead()
try:
self.bare_git.DetachHead(rev)
b = ['branch', '-d']
b.extend(kill)
b = GitCommand(self, b, bare=True,
capture_stdout=True,
capture_stderr=True)
b.Wait()
finally:
if ID_RE.match(old):
self.bare_git.DetachHead(old)
else:
self.bare_git.SetHead(old)
left = self._allrefs
for branch in kill:
if (R_HEADS + branch) not in left:
self.CleanPublishedCache()
break
if cb and cb not in kill:
kill.append(cb)
kill.sort()
kept = []
for branch in kill:
if R_HEADS + branch in left:
branch = self.GetBranch(branch)
base = branch.LocalMerge
if not base:
base = rev
kept.append(ReviewableBranch(self, branch, base))
return kept
# Submodule Management ##
def GetRegisteredSubprojects(self):
result = []
def rec(subprojects):
if not subprojects:
return
result.extend(subprojects)
for p in subprojects:
rec(p.subprojects)
rec(self.subprojects)
return result
def _GetSubmodules(self):
# Unfortunately we cannot call `git submodule status --recursive` here
# because the working tree might not exist yet, and it cannot be used
# without a working tree in its current implementation.
def get_submodules(gitdir, rev):
# Parse .gitmodules for submodule sub_paths and sub_urls
sub_paths, sub_urls = parse_gitmodules(gitdir, rev)
if not sub_paths:
return []
# Run `git ls-tree` to read SHAs of submodule object, which happen to be
# revision of submodule repository
sub_revs = git_ls_tree(gitdir, rev, sub_paths)
submodules = []
for sub_path, sub_url in zip(sub_paths, sub_urls):
try:
sub_rev = sub_revs[sub_path]
except KeyError:
# Ignore non-exist submodules
continue
submodules.append((sub_rev, sub_path, sub_url))
return submodules
re_path = re.compile(r'^submodule\.(.+)\.path=(.*)$')
re_url = re.compile(r'^submodule\.(.+)\.url=(.*)$')
def parse_gitmodules(gitdir, rev):
cmd = ['cat-file', 'blob', '%s:.gitmodules' % rev]
try:
p = GitCommand(None, cmd, capture_stdout=True, capture_stderr=True,
bare=True, gitdir=gitdir)
except GitError:
return [], []
if p.Wait() != 0:
return [], []
gitmodules_lines = []
fd, temp_gitmodules_path = tempfile.mkstemp()
try:
os.write(fd, p.stdout)
os.close(fd)
cmd = ['config', '--file', temp_gitmodules_path, '--list']
p = GitCommand(None, cmd, capture_stdout=True, capture_stderr=True,
bare=True, gitdir=gitdir)
if p.Wait() != 0:
return [], []
gitmodules_lines = p.stdout.split('\n')
except GitError:
return [], []
finally:
platform_utils.remove(temp_gitmodules_path)
names = set()
paths = {}
urls = {}
for line in gitmodules_lines:
if not line:
continue
m = re_path.match(line)
if m:
names.add(m.group(1))
paths[m.group(1)] = m.group(2)
continue
m = re_url.match(line)
if m:
names.add(m.group(1))
urls[m.group(1)] = m.group(2)
continue
names = sorted(names)
return ([paths.get(name, '') for name in names],
[urls.get(name, '') for name in names])
def git_ls_tree(gitdir, rev, paths):
cmd = ['ls-tree', rev, '--']
cmd.extend(paths)
try:
p = GitCommand(None, cmd, capture_stdout=True, capture_stderr=True,
bare=True, gitdir=gitdir)
except GitError:
return []
if p.Wait() != 0:
return []
objects = {}
for line in p.stdout.split('\n'):
if not line.strip():
continue
object_rev, object_path = line.split()[2:4]
objects[object_path] = object_rev
return objects
try:
rev = self.GetRevisionId()
except GitError:
return []
return get_submodules(self.gitdir, rev)
def GetDerivedSubprojects(self):
result = []
if not self.Exists:
# If git repo does not exist yet, querying its submodules will
# mess up its states; so return here.
return result
for rev, path, url in self._GetSubmodules():
name = self.manifest.GetSubprojectName(self, path)
relpath, worktree, gitdir, objdir = \
self.manifest.GetSubprojectPaths(self, name, path)
project = self.manifest.paths.get(relpath)
if project:
result.extend(project.GetDerivedSubprojects())
continue
if url.startswith('..'):
url = urllib.parse.urljoin("%s/" % self.remote.url, url)
remote = RemoteSpec(self.remote.name,
url=url,
pushUrl=self.remote.pushUrl,
review=self.remote.review,
revision=self.remote.revision)
subproject = Project(manifest=self.manifest,
name=name,
remote=remote,
gitdir=gitdir,
objdir=objdir,
worktree=worktree,
relpath=relpath,
revisionExpr=rev,
revisionId=rev,
rebase=self.rebase,
groups=self.groups,
sync_c=self.sync_c,
sync_s=self.sync_s,
sync_tags=self.sync_tags,
parent=self,
is_derived=True)
result.append(subproject)
result.extend(subproject.GetDerivedSubprojects())
return result
# Direct Git Commands ##
def _CheckForImmutableRevision(self):
try:
# if revision (sha or tag) is not present then following function
# throws an error.
self.bare_git.rev_parse('--verify', '%s^0' % self.revisionExpr)
return True
except GitError:
# There is no such persistent revision. We have to fetch it.
return False
def _FetchArchive(self, tarpath, cwd=None):
cmd = ['archive', '-v', '-o', tarpath]
cmd.append('--remote=%s' % self.remote.url)
cmd.append('--prefix=%s/' % self.relpath)
cmd.append(self.revisionExpr)
command = GitCommand(self, cmd, cwd=cwd,
capture_stdout=True,
capture_stderr=True)
if command.Wait() != 0:
raise GitError('git archive %s: %s' % (self.name, command.stderr))
def _RemoteFetch(self, name=None,
current_branch_only=False,
initial=False,
quiet=False,
alt_dir=None,
no_tags=False,
prune=False,
depth=None,
submodules=False,
force_sync=False):
is_sha1 = False
tag_name = None
# The depth should not be used when fetching to a mirror because
# it will result in a shallow repository that cannot be cloned or
# fetched from.
# The repo project should also never be synced with partial depth.
if self.manifest.IsMirror or self.relpath == '.repo/repo':
depth = None
if depth:
current_branch_only = True
if ID_RE.match(self.revisionExpr) is not None:
is_sha1 = True
if current_branch_only:
if self.revisionExpr.startswith(R_TAGS):
# this is a tag and its sha1 value should never change
tag_name = self.revisionExpr[len(R_TAGS):]
if is_sha1 or tag_name is not None:
if self._CheckForImmutableRevision():
if not quiet:
print('Skipped fetching project %s (already have persistent ref)'
% self.name)
return True
if is_sha1 and not depth:
# When syncing a specific commit and --depth is not set:
# * if upstream is explicitly specified and is not a sha1, fetch only
# upstream as users expect only upstream to be fetch.
# Note: The commit might not be in upstream in which case the sync
# will fail.
# * otherwise, fetch all branches to make sure we end up with the
# specific commit.
if self.upstream:
current_branch_only = not ID_RE.match(self.upstream)
else:
current_branch_only = False
if not name:
name = self.remote.name
ssh_proxy = False
remote = self.GetRemote(name)
if remote.PreConnectFetch():
ssh_proxy = True
if initial:
if alt_dir and 'objects' == os.path.basename(alt_dir):
ref_dir = os.path.dirname(alt_dir)
packed_refs = os.path.join(self.gitdir, 'packed-refs')
remote = self.GetRemote(name)
all_refs = self.bare_ref.all
ids = set(all_refs.values())
tmp = set()
for r, ref_id in GitRefs(ref_dir).all.items():
if r not in all_refs:
if r.startswith(R_TAGS) or remote.WritesTo(r):
all_refs[r] = ref_id
ids.add(ref_id)
continue
if ref_id in ids:
continue
r = 'refs/_alt/%s' % ref_id
all_refs[r] = ref_id
ids.add(ref_id)
tmp.add(r)
tmp_packed_lines = []
old_packed_lines = []
for r in sorted(all_refs):
line = '%s %s\n' % (all_refs[r], r)
tmp_packed_lines.append(line)
if r not in tmp:
old_packed_lines.append(line)
tmp_packed = ''.join(tmp_packed_lines)
old_packed = ''.join(old_packed_lines)
_lwrite(packed_refs, tmp_packed)
else:
alt_dir = None
cmd = ['fetch']
if depth:
cmd.append('--depth=%s' % depth)
else:
# If this repo has shallow objects, then we don't know which refs have
# shallow objects or not. Tell git to unshallow all fetched refs. Don't
# do this with projects that don't have shallow objects, since it is less
# efficient.
if os.path.exists(os.path.join(self.gitdir, 'shallow')):
cmd.append('--depth=2147483647')
if quiet:
cmd.append('--quiet')
if not self.worktree:
cmd.append('--update-head-ok')
cmd.append(name)
# If using depth then we should not get all the tags since they may
# be outside of the depth.
if no_tags or depth:
cmd.append('--no-tags')
else:
cmd.append('--tags')
if force_sync:
cmd.append('--force')
if prune:
cmd.append('--prune')
if submodules:
cmd.append('--recurse-submodules=on-demand')
spec = []
if not current_branch_only:
# Fetch whole repo
spec.append(str((u'+refs/heads/*:') + remote.ToLocal('refs/heads/*')))
elif tag_name is not None:
spec.append('tag')
spec.append(tag_name)
if not self.manifest.IsMirror:
branch = self.revisionExpr
if is_sha1 and depth and git_require((1, 8, 3)):
# Shallow checkout of a specific commit, fetch from that commit and not
# the heads only as the commit might be deeper in the history.
spec.append(branch)
else:
if is_sha1:
branch = self.upstream
if branch is not None and branch.strip():
if not branch.startswith('refs/'):
branch = R_HEADS + branch
spec.append(str((u'+%s:' % branch) + remote.ToLocal(branch)))
cmd.extend(spec)
ok = False
for _i in range(2):
gitcmd = GitCommand(self, cmd, bare=True, ssh_proxy=ssh_proxy)
ret = gitcmd.Wait()
if ret == 0:
ok = True
break
# If needed, run the 'git remote prune' the first time through the loop
elif (not _i and
"error:" in gitcmd.stderr and
"git remote prune" in gitcmd.stderr):
prunecmd = GitCommand(self, ['remote', 'prune', name], bare=True,
ssh_proxy=ssh_proxy)
ret = prunecmd.Wait()
if ret:
break
continue
elif current_branch_only and is_sha1 and ret == 128:
# Exit code 128 means "couldn't find the ref you asked for"; if we're
# in sha1 mode, we just tried sync'ing from the upstream field; it
# doesn't exist, thus abort the optimization attempt and do a full sync.
break
elif ret < 0:
# Git died with a signal, exit immediately
break
time.sleep(random.randint(30, 45))
if initial:
if alt_dir:
if old_packed != '':
_lwrite(packed_refs, old_packed)
else:
platform_utils.remove(packed_refs)
self.bare_git.pack_refs('--all', '--prune')
if is_sha1 and current_branch_only:
# We just synced the upstream given branch; verify we
# got what we wanted, else trigger a second run of all
# refs.
if not self._CheckForImmutableRevision():
if current_branch_only and depth:
# Sync the current branch only with depth set to None
return self._RemoteFetch(name=name,
current_branch_only=current_branch_only,
initial=False, quiet=quiet, alt_dir=alt_dir,
depth=None)
else:
# Avoid infinite recursion: sync all branches with depth set to None
return self._RemoteFetch(name=name, current_branch_only=False,
initial=False, quiet=quiet, alt_dir=alt_dir,
depth=None)
return ok
def _ApplyCloneBundle(self, initial=False, quiet=False):
if initial and \
(self.manifest.manifestProject.config.GetString('repo.depth') or
self.clone_depth):
return False
remote = self.GetRemote(self.remote.name)
bundle_url = remote.url + '/clone.bundle'
bundle_url = GitConfig.ForUser().UrlInsteadOf(bundle_url)
if GetSchemeFromUrl(bundle_url) not in ('http', 'https',
'persistent-http',
'persistent-https'):
return False
bundle_dst = os.path.join(self.gitdir, 'clone.bundle')
bundle_tmp = os.path.join(self.gitdir, 'clone.bundle.tmp')
exist_dst = os.path.exists(bundle_dst)
exist_tmp = os.path.exists(bundle_tmp)
if not initial and not exist_dst and not exist_tmp:
return False
if not exist_dst:
exist_dst = self._FetchBundle(bundle_url, bundle_tmp, bundle_dst, quiet)
if not exist_dst:
return False
cmd = ['fetch']
if quiet:
cmd.append('--quiet')
if not self.worktree:
cmd.append('--update-head-ok')
cmd.append(bundle_dst)
for f in remote.fetch:
cmd.append(str(f))
cmd.append('+refs/tags/*:refs/tags/*')
ok = GitCommand(self, cmd, bare=True).Wait() == 0
if os.path.exists(bundle_dst):
platform_utils.remove(bundle_dst)
if os.path.exists(bundle_tmp):
platform_utils.remove(bundle_tmp)
return ok
def _FetchBundle(self, srcUrl, tmpPath, dstPath, quiet):
if os.path.exists(dstPath):
platform_utils.remove(dstPath)
cmd = ['curl', '--fail', '--output', tmpPath, '--netrc', '--location']
if quiet:
cmd += ['--silent']
if os.path.exists(tmpPath):
size = os.stat(tmpPath).st_size
if size >= 1024:
cmd += ['--continue-at', '%d' % (size,)]
else:
platform_utils.remove(tmpPath)
with GetUrlCookieFile(srcUrl, quiet) as (cookiefile, proxy):
if cookiefile:
cmd += ['--cookie', cookiefile, '--cookie-jar', cookiefile]
if proxy:
cmd += ['--proxy', proxy]
elif 'http_proxy' in os.environ and 'darwin' == sys.platform:
cmd += ['--proxy', os.environ['http_proxy']]
if srcUrl.startswith('persistent-https'):
srcUrl = 'http' + srcUrl[len('persistent-https'):]
elif srcUrl.startswith('persistent-http'):
srcUrl = 'http' + srcUrl[len('persistent-http'):]
cmd += [srcUrl]
if IsTrace():
Trace('%s', ' '.join(cmd))
try:
proc = subprocess.Popen(cmd)
except OSError:
return False
curlret = proc.wait()
if curlret == 22:
# From curl man page:
# 22: HTTP page not retrieved. The requested url was not found or
# returned another error with the HTTP error code being 400 or above.
# This return code only appears if -f, --fail is used.
if not quiet:
print("Server does not provide clone.bundle; ignoring.",
file=sys.stderr)
return False
if os.path.exists(tmpPath):
if curlret == 0 and self._IsValidBundle(tmpPath, quiet):
platform_utils.rename(tmpPath, dstPath)
return True
else:
platform_utils.remove(tmpPath)
return False
else:
return False
def _IsValidBundle(self, path, quiet):
try:
with open(path) as f:
if f.read(16) == '# v2 git bundle\n':
return True
else:
if not quiet:
print("Invalid clone.bundle file; ignoring.", file=sys.stderr)
return False
except OSError:
return False
def _Checkout(self, rev, quiet=False):
cmd = ['checkout']
if quiet:
cmd.append('-q')
cmd.append(rev)
cmd.append('--')
if GitCommand(self, cmd).Wait() != 0:
if self._allrefs:
raise GitError('%s checkout %s ' % (self.name, rev))
def _CherryPick(self, rev):
cmd = ['cherry-pick']
cmd.append(rev)
cmd.append('--')
if GitCommand(self, cmd).Wait() != 0:
if self._allrefs:
raise GitError('%s cherry-pick %s ' % (self.name, rev))
def _LsRemote(self, refs):
cmd = ['ls-remote', self.remote.name, refs]
p = GitCommand(self, cmd, capture_stdout=True)
if p.Wait() == 0:
if hasattr(p.stdout, 'decode'):
return p.stdout.decode('utf-8')
else:
return p.stdout
return None
def _Revert(self, rev):
cmd = ['revert']
cmd.append('--no-edit')
cmd.append(rev)
cmd.append('--')
if GitCommand(self, cmd).Wait() != 0:
if self._allrefs:
raise GitError('%s revert %s ' % (self.name, rev))
def _ResetHard(self, rev, quiet=True):
cmd = ['reset', '--hard']
if quiet:
cmd.append('-q')
cmd.append(rev)
if GitCommand(self, cmd).Wait() != 0:
raise GitError('%s reset --hard %s ' % (self.name, rev))
def _SyncSubmodules(self, quiet=True):
cmd = ['submodule', 'update', '--init', '--recursive']
if quiet:
cmd.append('-q')
if GitCommand(self, cmd).Wait() != 0:
raise GitError('%s submodule update --init --recursive %s ' % self.name)
def _Rebase(self, upstream, onto=None):
cmd = ['rebase']
if onto is not None:
cmd.extend(['--onto', onto])
cmd.append(upstream)
if GitCommand(self, cmd).Wait() != 0:
raise GitError('%s rebase %s ' % (self.name, upstream))
def _FastForward(self, head, ffonly=False):
cmd = ['merge', head]
if ffonly:
cmd.append("--ff-only")
if GitCommand(self, cmd).Wait() != 0:
raise GitError('%s merge %s ' % (self.name, head))
def _InitGitDir(self, mirror_git=None, force_sync=False):
init_git_dir = not os.path.exists(self.gitdir)
init_obj_dir = not os.path.exists(self.objdir)
try:
# Initialize the bare repository, which contains all of the objects.
if init_obj_dir:
os.makedirs(self.objdir)
self.bare_objdir.init()
# If we have a separate directory to hold refs, initialize it as well.
if self.objdir != self.gitdir:
if init_git_dir:
os.makedirs(self.gitdir)
if init_obj_dir or init_git_dir:
self._ReferenceGitDir(self.objdir, self.gitdir, share_refs=False,
copy_all=True)
try:
self._CheckDirReference(self.objdir, self.gitdir, share_refs=False)
except GitError as e:
if force_sync:
print("Retrying clone after deleting %s" %
self.gitdir, file=sys.stderr)
try:
platform_utils.rmtree(platform_utils.realpath(self.gitdir))
if self.worktree and os.path.exists(platform_utils.realpath
(self.worktree)):
platform_utils.rmtree(platform_utils.realpath(self.worktree))
return self._InitGitDir(mirror_git=mirror_git, force_sync=False)
except:
raise e
raise e
if init_git_dir:
mp = self.manifest.manifestProject
ref_dir = mp.config.GetString('repo.reference') or ''
if ref_dir or mirror_git:
if not mirror_git:
mirror_git = os.path.join(ref_dir, self.name + '.git')
repo_git = os.path.join(ref_dir, '.repo', 'projects',
self.relpath + '.git')
if os.path.exists(mirror_git):
ref_dir = mirror_git
elif os.path.exists(repo_git):
ref_dir = repo_git
else:
ref_dir = None
if ref_dir:
if not os.path.isabs(ref_dir):
# The alternate directory is relative to the object database.
ref_dir = os.path.relpath(ref_dir,
os.path.join(self.objdir, 'objects'))
_lwrite(os.path.join(self.gitdir, 'objects/info/alternates'),
os.path.join(ref_dir, 'objects') + '\n')
self._UpdateHooks()
m = self.manifest.manifestProject.config
for key in ['user.name', 'user.email']:
if m.Has(key, include_defaults=False):
self.config.SetString(key, m.GetString(key))
self.config.SetString('filter.lfs.smudge', 'git-lfs smudge --skip -- %f')
self.config.SetString('filter.lfs.process', 'git-lfs filter-process --skip')
if self.manifest.IsMirror:
self.config.SetString('core.bare', 'true')
else:
self.config.SetString('core.bare', None)
except Exception:
if init_obj_dir and os.path.exists(self.objdir):
platform_utils.rmtree(self.objdir)
if init_git_dir and os.path.exists(self.gitdir):
platform_utils.rmtree(self.gitdir)
raise
def _UpdateHooks(self):
if os.path.exists(self.gitdir):
self._InitHooks()
def _InitHooks(self):
hooks = platform_utils.realpath(self._gitdir_path('hooks'))
if not os.path.exists(hooks):
os.makedirs(hooks)
for stock_hook in _ProjectHooks():
name = os.path.basename(stock_hook)
if name in ('commit-msg',) and not self.remote.review \
and self is not self.manifest.manifestProject:
# Don't install a Gerrit Code Review hook if this
# project does not appear to use it for reviews.
#
# Since the manifest project is one of those, but also
# managed through gerrit, it's excluded
continue
dst = os.path.join(hooks, name)
if platform_utils.islink(dst):
continue
if os.path.exists(dst):
if filecmp.cmp(stock_hook, dst, shallow=False):
platform_utils.remove(dst)
else:
_warn("%s: Not replacing locally modified %s hook",
self.relpath, name)
continue
try:
platform_utils.symlink(
os.path.relpath(stock_hook, os.path.dirname(dst)), dst)
except OSError as e:
if e.errno == errno.EPERM:
raise GitError(self._get_symlink_error_message())
else:
raise
def _InitRemote(self):
if self.remote.url:
remote = self.GetRemote(self.remote.name)
remote.url = self.remote.url
remote.pushUrl = self.remote.pushUrl
remote.review = self.remote.review
remote.projectname = self.name
if self.worktree:
remote.ResetFetch(mirror=False)
else:
remote.ResetFetch(mirror=True)
remote.Save()
def _InitMRef(self):
if self.manifest.branch:
self._InitAnyMRef(R_M + self.manifest.branch)
def _InitMirrorHead(self):
self._InitAnyMRef(HEAD)
def _InitAnyMRef(self, ref):
cur = self.bare_ref.symref(ref)
if self.revisionId:
if cur != '' or self.bare_ref.get(ref) != self.revisionId:
msg = 'manifest set to %s' % self.revisionId
dst = self.revisionId + '^0'
self.bare_git.UpdateRef(ref, dst, message=msg, detach=True)
else:
remote = self.GetRemote(self.remote.name)
dst = remote.ToLocal(self.revisionExpr)
if cur != dst:
msg = 'manifest set to %s' % self.revisionExpr
self.bare_git.symbolic_ref('-m', msg, ref, dst)
def _CheckDirReference(self, srcdir, destdir, share_refs):
symlink_files = self.shareable_files[:]
symlink_dirs = self.shareable_dirs[:]
if share_refs:
symlink_files += self.working_tree_files
symlink_dirs += self.working_tree_dirs
to_symlink = symlink_files + symlink_dirs
for name in set(to_symlink):
dst = platform_utils.realpath(os.path.join(destdir, name))
if os.path.lexists(dst):
src = platform_utils.realpath(os.path.join(srcdir, name))
# Fail if the links are pointing to the wrong place
if src != dst:
_error('%s is different in %s vs %s', name, destdir, srcdir)
raise GitError('--force-sync not enabled; cannot overwrite a local '
'work tree. If you\'re comfortable with the '
'possibility of losing the work tree\'s git metadata,'
' use `repo sync --force-sync {0}` to '
'proceed.'.format(self.relpath))
def _ReferenceGitDir(self, gitdir, dotgit, share_refs, copy_all):
"""Update |dotgit| to reference |gitdir|, using symlinks where possible.
Args:
gitdir: The bare git repository. Must already be initialized.
dotgit: The repository you would like to initialize.
share_refs: If true, |dotgit| will store its refs under |gitdir|.
Only one work tree can store refs under a given |gitdir|.
copy_all: If true, copy all remaining files from |gitdir| -> |dotgit|.
This saves you the effort of initializing |dotgit| yourself.
"""
symlink_files = self.shareable_files[:]
symlink_dirs = self.shareable_dirs[:]
if share_refs:
symlink_files += self.working_tree_files
symlink_dirs += self.working_tree_dirs
to_symlink = symlink_files + symlink_dirs
to_copy = []
if copy_all:
to_copy = platform_utils.listdir(gitdir)
dotgit = platform_utils.realpath(dotgit)
for name in set(to_copy).union(to_symlink):
try:
src = platform_utils.realpath(os.path.join(gitdir, name))
dst = os.path.join(dotgit, name)
if os.path.lexists(dst):
continue
# If the source dir doesn't exist, create an empty dir.
if name in symlink_dirs and not os.path.lexists(src):
os.makedirs(src)
if name in to_symlink:
platform_utils.symlink(
os.path.relpath(src, os.path.dirname(dst)), dst)
elif copy_all and not platform_utils.islink(dst):
if platform_utils.isdir(src):
shutil.copytree(src, dst)
elif os.path.isfile(src):
shutil.copy(src, dst)
# If the source file doesn't exist, ensure the destination
# file doesn't either.
if name in symlink_files and not os.path.lexists(src):
try:
platform_utils.remove(dst)
except OSError:
pass
except OSError as e:
if e.errno == errno.EPERM:
raise DownloadError(self._get_symlink_error_message())
else:
raise
def _InitWorkTree(self, force_sync=False, submodules=False):
dotgit = os.path.join(self.worktree, '.git')
init_dotgit = not os.path.exists(dotgit)
try:
if init_dotgit:
os.makedirs(dotgit)
self._ReferenceGitDir(self.gitdir, dotgit, share_refs=True,
copy_all=False)
try:
self._CheckDirReference(self.gitdir, dotgit, share_refs=True)
except GitError as e:
if force_sync:
try:
platform_utils.rmtree(dotgit)
return self._InitWorkTree(force_sync=False, submodules=submodules)
except:
raise e
raise e
if init_dotgit:
_lwrite(os.path.join(dotgit, HEAD), '%s\n' % self.GetRevisionId())
cmd = ['read-tree', '--reset', '-u']
cmd.append('-v')
cmd.append(HEAD)
if GitCommand(self, cmd).Wait() != 0:
raise GitError("cannot initialize work tree for " + self.name)
if submodules:
self._SyncSubmodules(quiet=True)
self._CopyAndLinkFiles()
except Exception:
if init_dotgit:
platform_utils.rmtree(dotgit)
raise
def _get_symlink_error_message(self):
if platform_utils.isWindows():
return ('Unable to create symbolic link. Please re-run the command as '
'Administrator, or see '
'https://github.com/git-for-windows/git/wiki/Symbolic-Links '
'for other options.')
return 'filesystem must support symlinks'
def _gitdir_path(self, path):
return platform_utils.realpath(os.path.join(self.gitdir, path))
def _revlist(self, *args, **kw):
a = []
a.extend(args)
a.append('--')
return self.work_git.rev_list(*a, **kw)
@property
def _allrefs(self):
return self.bare_ref.all
def _getLogs(self, rev1, rev2, oneline=False, color=True, pretty_format=None):
"""Get logs between two revisions of this project."""
comp = '..'
if rev1:
revs = [rev1]
if rev2:
revs.extend([comp, rev2])
cmd = ['log', ''.join(revs)]
out = DiffColoring(self.config)
if out.is_on and color:
cmd.append('--color')
if pretty_format is not None:
cmd.append('--pretty=format:%s' % pretty_format)
if oneline:
cmd.append('--oneline')
try:
log = GitCommand(self, cmd, capture_stdout=True, capture_stderr=True)
if log.Wait() == 0:
return log.stdout
except GitError:
# worktree may not exist if groups changed for example. In that case,
# try in gitdir instead.
if not os.path.exists(self.worktree):
return self.bare_git.log(*cmd[1:])
else:
raise
return None
def getAddedAndRemovedLogs(self, toProject, oneline=False, color=True,
pretty_format=None):
"""Get the list of logs from this revision to given revisionId"""
logs = {}
selfId = self.GetRevisionId(self._allrefs)
toId = toProject.GetRevisionId(toProject._allrefs)
logs['added'] = self._getLogs(selfId, toId, oneline=oneline, color=color,
pretty_format=pretty_format)
logs['removed'] = self._getLogs(toId, selfId, oneline=oneline, color=color,
pretty_format=pretty_format)
return logs
class _GitGetByExec(object):
def __init__(self, project, bare, gitdir):
self._project = project
self._bare = bare
self._gitdir = gitdir
def LsOthers(self):
p = GitCommand(self._project,
['ls-files',
'-z',
'--others',
'--exclude-standard'],
bare=False,
gitdir=self._gitdir,
capture_stdout=True,
capture_stderr=True)
if p.Wait() == 0:
out = p.stdout
if out:
# Backslash is not anomalous
return out[:-1].split('\0')
return []
def DiffZ(self, name, *args):
cmd = [name]
cmd.append('-z')
cmd.append('--ignore-submodules')
cmd.extend(args)
p = GitCommand(self._project,
cmd,
gitdir=self._gitdir,
bare=False,
capture_stdout=True,
capture_stderr=True)
try:
out = p.process.stdout.read()
r = {}
if out:
out = iter(out[:-1].split('\0'))
while out:
try:
info = next(out)
path = next(out)
except StopIteration:
break
class _Info(object):
def __init__(self, path, omode, nmode, oid, nid, state):
self.path = path
self.src_path = None
self.old_mode = omode
self.new_mode = nmode
self.old_id = oid
self.new_id = nid
if len(state) == 1:
self.status = state
self.level = None
else:
self.status = state[:1]
self.level = state[1:]
while self.level.startswith('0'):
self.level = self.level[1:]
info = info[1:].split(' ')
info = _Info(path, *info)
if info.status in ('R', 'C'):
info.src_path = info.path
info.path = next(out)
r[info.path] = info
return r
finally:
p.Wait()
def GetHead(self):
if self._bare:
path = os.path.join(self._project.gitdir, HEAD)
else:
path = os.path.join(self._project.worktree, '.git', HEAD)
try:
fd = open(path)
except IOError as e:
raise NoManifestException(path, str(e))
try:
line = fd.readline()
finally:
fd.close()
try:
line = line.decode()
except AttributeError:
pass
if line.startswith('ref: '):
return line[5:-1]
return line[:-1]
def SetHead(self, ref, message=None):
cmdv = []
if message is not None:
cmdv.extend(['-m', message])
cmdv.append(HEAD)
cmdv.append(ref)
self.symbolic_ref(*cmdv)
def DetachHead(self, new, message=None):
cmdv = ['--no-deref']
if message is not None:
cmdv.extend(['-m', message])
cmdv.append(HEAD)
cmdv.append(new)
self.update_ref(*cmdv)
def UpdateRef(self, name, new, old=None,
message=None,
detach=False):
cmdv = []
if message is not None:
cmdv.extend(['-m', message])
if detach:
cmdv.append('--no-deref')
cmdv.append(name)
cmdv.append(new)
if old is not None:
cmdv.append(old)
self.update_ref(*cmdv)
def DeleteRef(self, name, old=None):
if not old:
old = self.rev_parse(name)
self.update_ref('-d', name, old)
self._project.bare_ref.deleted(name)
def rev_list(self, *args, **kw):
if 'format' in kw:
cmdv = ['log', '--pretty=format:%s' % kw['format']]
else:
cmdv = ['rev-list']
cmdv.extend(args)
p = GitCommand(self._project,
cmdv,
bare=self._bare,
gitdir=self._gitdir,
capture_stdout=True,
capture_stderr=True)
r = []
for line in p.process.stdout:
if line[-1] == '\n':
line = line[:-1]
r.append(line)
if p.Wait() != 0:
raise GitError('%s rev-list %s: %s' %
(self._project.name, str(args), p.stderr))
return r
def __getattr__(self, name):
"""Allow arbitrary git commands using pythonic syntax.
This allows you to do things like:
git_obj.rev_parse('HEAD')
Since we don't have a 'rev_parse' method defined, the __getattr__ will
run. We'll replace the '_' with a '-' and try to run a git command.
Any other positional arguments will be passed to the git command, and the
following keyword arguments are supported:
config: An optional dict of git config options to be passed with '-c'.
Args:
name: The name of the git command to call. Any '_' characters will
be replaced with '-'.
Returns:
A callable object that will try to call git with the named command.
"""
name = name.replace('_', '-')
def runner(*args, **kwargs):
cmdv = []
config = kwargs.pop('config', None)
for k in kwargs:
raise TypeError('%s() got an unexpected keyword argument %r'
% (name, k))
if config is not None:
if not git_require((1, 7, 2)):
raise ValueError('cannot set config on command line for %s()'
% name)
for k, v in config.items():
cmdv.append('-c')
cmdv.append('%s=%s' % (k, v))
cmdv.append(name)
cmdv.extend(args)
p = GitCommand(self._project,
cmdv,
bare=self._bare,
gitdir=self._gitdir,
capture_stdout=True,
capture_stderr=True)
if p.Wait() != 0:
raise GitError('%s %s: %s' %
(self._project.name, name, p.stderr))
r = p.stdout
try:
r = r.decode('utf-8')
except AttributeError:
pass
if r.endswith('\n') and r.index('\n') == len(r) - 1:
return r[:-1]
return r
return runner
class _PriorSyncFailedError(Exception):
def __str__(self):
return 'prior sync failed; rebase still in progress'
class _DirtyError(Exception):
def __str__(self):
return 'contains uncommitted changes'
class _InfoMessage(object):
def __init__(self, project, text):
self.project = project
self.text = text
def Print(self, syncbuf):
syncbuf.out.info('%s/: %s', self.project.relpath, self.text)
syncbuf.out.nl()
class _Failure(object):
def __init__(self, project, why):
self.project = project
self.why = why
def Print(self, syncbuf):
syncbuf.out.fail('error: %s/: %s',
self.project.relpath,
str(self.why))
syncbuf.out.nl()
class _Later(object):
def __init__(self, project, action):
self.project = project
self.action = action
def Run(self, syncbuf):
out = syncbuf.out
out.project('project %s/', self.project.relpath)
out.nl()
try:
self.action()
out.nl()
return True
except GitError:
out.nl()
return False
class _SyncColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'reposync')
self.project = self.printer('header', attr='bold')
self.info = self.printer('info')
self.fail = self.printer('fail', fg='red')
class SyncBuffer(object):
def __init__(self, config, detach_head=False):
self._messages = []
self._failures = []
self._later_queue1 = []
self._later_queue2 = []
self.out = _SyncColoring(config)
self.out.redirect(sys.stderr)
self.detach_head = detach_head
self.clean = True
self.recent_clean = True
def info(self, project, fmt, *args):
self._messages.append(_InfoMessage(project, fmt % args))
def fail(self, project, err=None):
self._failures.append(_Failure(project, err))
self._MarkUnclean()
def later1(self, project, what):
self._later_queue1.append(_Later(project, what))
def later2(self, project, what):
self._later_queue2.append(_Later(project, what))
def Finish(self):
self._PrintMessages()
self._RunLater()
self._PrintMessages()
return self.clean
def Recently(self):
recent_clean = self.recent_clean
self.recent_clean = True
return recent_clean
def _MarkUnclean(self):
self.clean = False
self.recent_clean = False
def _RunLater(self):
for q in ['_later_queue1', '_later_queue2']:
if not self._RunQueue(q):
return
def _RunQueue(self, queue):
for m in getattr(self, queue):
if not m.Run(self):
self._MarkUnclean()
return False
setattr(self, queue, [])
return True
def _PrintMessages(self):
for m in self._messages:
m.Print(self)
for m in self._failures:
m.Print(self)
self._messages = []
self._failures = []
class MetaProject(Project):
"""A special project housed under .repo.
"""
def __init__(self, manifest, name, gitdir, worktree):
Project.__init__(self,
manifest=manifest,
name=name,
gitdir=gitdir,
objdir=gitdir,
worktree=worktree,
remote=RemoteSpec('origin'),
relpath='.repo/%s' % name,
revisionExpr='refs/heads/master',
revisionId=None,
groups=None)
def PreSync(self):
if self.Exists:
cb = self.CurrentBranch
if cb:
base = self.GetBranch(cb).merge
if base:
self.revisionExpr = base
self.revisionId = None
def MetaBranchSwitch(self, submodules=False):
""" Prepare MetaProject for manifest branch switch
"""
# detach and delete manifest branch, allowing a new
# branch to take over
syncbuf = SyncBuffer(self.config, detach_head=True)
self.Sync_LocalHalf(syncbuf, submodules=submodules)
syncbuf.Finish()
return GitCommand(self,
['update-ref', '-d', 'refs/heads/default'],
capture_stdout=True,
capture_stderr=True).Wait() == 0
@property
def LastFetch(self):
try:
fh = os.path.join(self.gitdir, 'FETCH_HEAD')
return os.path.getmtime(fh)
except OSError:
return 0
@property
def HasChanges(self):
"""Has the remote received new commits not yet checked out?
"""
if not self.remote or not self.revisionExpr:
return False
all_refs = self.bare_ref.all
revid = self.GetRevisionId(all_refs)
head = self.work_git.GetHead()
if head.startswith(R_HEADS):
try:
head = all_refs[head]
except KeyError:
head = None
if revid == head:
return False
elif self._revlist(not_rev(HEAD), revid):
return True
return False
| StarcoderdataPython |
8035243 | <gh_stars>0
import nose.tools
from nose import with_setup
import sys
import numpy as np
import datetime
# my module
from nwispy import nwispy_helpers as helpers
# define the global fixture to hold the data that goes into the functions you test
fixture = {}
def setup():
""" Setup fixture for testing """
print >> sys.stderr, "SETUP: helpers tests"
fixture["dates"] = np.array([datetime.datetime(2014, 01, 01, 0, 0) + datetime.timedelta(i) for i in range(11)])
fixture["values"] = np.array([i for i in range(11)])
fixture["shorter_dates"] = np.array([datetime.datetime(2014, 01, 03, 0, 0) + datetime.timedelta(i) for i in range(11)])
fixture["longer_dates"] = np.array([datetime.datetime(2013, 12, 01, 0, 0) + datetime.timedelta(i) for i in range(180)])
def teardown():
""" Print to standard error when all tests are finished """
print >> sys.stderr, "TEARDOWN: helpers tests"
def test_isfloat():
nose.tools.assert_equals(True, helpers.isfloat(6.25))
nose.tools.assert_equals(True, helpers.isfloat("6.25"))
nose.tools.assert_equals(False, helpers.isfloat("2.5_"))
nose.tools.assert_equals(False, helpers.isfloat("hello"))
def test_convert_to_float():
nose.tools.assert_equals(6.25, helpers.convert_to_float("6.25", helper_str = "My help message"))
nose.tools.assert_equals(2.5, helpers.convert_to_float("2.5_", helper_str = "My help message"))
nose.tools.assert_almost_equals(np.array(np.nan).all(), np.array(helpers.convert_to_float("", helper_str = "My help message")).all())
nose.tools.assert_almost_equals(np.array(np.nan).all(), np.array(helpers.convert_to_float("hello", helper_str = "My help message")).all())
def test_rmspecialchars():
nose.tools.assert_equals("6.5", helpers.rmspecialchars("*6.5_"))
nose.tools.assert_equals("4.25", helpers.rmspecialchars("*$^**(@4.25_+;"))
nose.tools.assert_equals("-4.1", helpers.rmspecialchars("-4.1"))
def test_create_monthly_dict():
expected = {"January": [], "February": [], "March": [], "April": [], "May": [], "June": [], "July": [], "August": [], "September": [], "October": [], "November": [], "December": []}
actual = helpers.create_monthly_dict()
nose.tools.assert_equals(len(expected.keys()), len(actual.keys()))
nose.tools.assert_equals(expected["January"], actual["January"])
nose.tools.assert_equals(expected["February"], actual["February"])
nose.tools.assert_equals(expected["April"], actual["April"])
nose.tools.assert_equals(expected["May"], actual["May"])
nose.tools.assert_equals(expected["June"], actual["June"])
nose.tools.assert_equals(expected["July"], actual["July"])
nose.tools.assert_equals(expected["August"], actual["August"])
nose.tools.assert_equals(expected["September"], actual["September"])
nose.tools.assert_equals(expected["October"], actual["October"])
nose.tools.assert_equals(expected["November"], actual["November"])
nose.tools.assert_equals(expected["December"], actual["December"])
def test_subset_data_dates_within_range():
start = datetime.datetime(2014, 01, 04)
end = datetime.datetime(2014, 01, 10)
expected_dates = np.array([datetime.datetime(2014, 1, 4, 0, 0), datetime.datetime(2014, 1, 5, 0, 0),
datetime.datetime(2014, 1, 6, 0, 0), datetime.datetime(2014, 1, 7, 0, 0),
datetime.datetime(2014, 1, 8, 0, 0), datetime.datetime(2014, 1, 9, 0, 0),
datetime.datetime(2014, 1, 10, 0, 0)])
expected_values = np.array([3, 4, 5, 6, 7, 8, 9])
actual_dates, actual_values = helpers.subset_data(dates = fixture["dates"],
values = fixture["values"],
start_date = start,
end_date = end)
nose.tools.assert_equals(actual_dates.all(), expected_dates.all())
nose.tools.assert_equals(actual_values.all(), expected_values.all())
def test_subset_data_dates_outside_range():
start = datetime.datetime(2013, 12, 01)
end = datetime.datetime(2014, 01, 20)
expected_dates = np.array([datetime.datetime(2014, 1, 1, 0, 0), datetime.datetime(2014, 1, 2, 0, 0),
datetime.datetime(2014, 1, 3, 0, 0), datetime.datetime(2014, 1, 4, 0, 0),
datetime.datetime(2014, 1, 5, 0, 0), datetime.datetime(2014, 1, 6, 0, 0),
datetime.datetime(2014, 1, 7, 0, 0), datetime.datetime(2014, 1, 8, 0, 0),
datetime.datetime(2014, 1, 9, 0, 0), datetime.datetime(2014, 1, 10, 0, 0),
datetime.datetime(2014, 1, 11, 0, 0)])
expected_values = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
actual_dates, actual_values = helpers.subset_data(dates = fixture["dates"],
values = fixture["values"],
start_date = start,
end_date = end)
nose.tools.assert_equals(actual_dates.all(), expected_dates.all())
nose.tools.assert_equals(actual_values.all(), expected_values.all())
def test_find_start_end_dates_shorter_range():
expected_start_date = datetime.datetime(2014, 01, 03, 0, 0)
expected_end_date = datetime.datetime(2014, 01, 11, 0, 0)
actual_start_date, actual_end_date = helpers.find_start_end_dates(fixture["dates"], fixture["shorter_dates"])
nose.tools.assert_equals(actual_start_date, expected_start_date)
nose.tools.assert_equals(actual_end_date, expected_end_date)
def test_find_start_end_dates_longer_range():
expected_start_date = datetime.datetime(2014, 01, 01, 0, 0)
expected_end_date = datetime.datetime(2014, 01, 11, 0, 0)
actual_start_date, actual_end_date = helpers.find_start_end_dates(fixture["dates"], fixture["longer_dates"])
nose.tools.assert_equals(actual_start_date, expected_start_date)
nose.tools.assert_equals(actual_end_date, expected_end_date)
| StarcoderdataPython |
6468645 | import argparse
import os
import sys
from junit_xml_parser import (
validate_junit_xml_file,
validate_junit_xml_archive,
parse_test_result
)
from report_data_storage import KustoConnector
def _run_script():
parser = argparse.ArgumentParser(
description="Upload test reports to Kusto.",
formatter_class=argparse.RawTextHelpFormatter,
epilog="""
Examples:
python3 report_uploader.py tests/files/sample_tr.xml -e TRACKING_ID#22
""",
)
parser.add_argument("path_name", metavar="path", type=str, help="A file/directory to upload.")
parser.add_argument("db_name", metavar="database", type=str, help="The Kusto DB to upload to.")
parser.add_argument(
"--external_id", "-e", type=str, help="An external tracking ID to append to the report.",
)
args = parser.parse_args()
path = args.path_name
if not os.path.exists(path):
print(f"{path} not found")
sys.exit(1)
# FIXME: This interface is actually really clunky, should just have one method and check file
# v. dir internally. Fix in the next PR.
if os.path.isfile(path):
roots = [validate_junit_xml_file(path)]
else:
roots = validate_junit_xml_archive(path)
test_result_json = parse_test_result(roots)
tracking_id = args.external_id if args.external_id else ""
kusto_db = KustoConnector(args.db_name)
kusto_db.upload_report(test_result_json, tracking_id)
if __name__ == "__main__":
_run_script()
| StarcoderdataPython |
9723277 | <filename>gimmemotifs/cli.py
#!/usr/bin/env python
# Copyright (c) 2013-2019 <NAME> <<EMAIL>>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
import os
import sys
import argparse
from gimmemotifs.config import MotifConfig, BG_TYPES, BED_VALID_BGS
from gimmemotifs import commands, __version__
from gimmemotifs.utils import check_genome
def cli(sys_args):
config = MotifConfig()
params = config.get_default_params()
default_pfm_file = os.path.join(config.get_motif_dir(), params["motif_db"])
default_pfm = params["motif_db"]
description = """
GimmeMotifs v{0}
""".format(
__version__
)
epilog = """
commands:
motifs identify enriched motifs (known and/or de novo)
scan scan for known motifs
maelstrom find differential motifs
match find motif matches in database
logo create sequence logo(s)
cluster cluster similar motifs
background create a background file
threshold calculate motif scan threshold
location motif location histograms
diff compare motif frequency and enrichment
between fasta files
motif2factors generate a motif database based on orthology for any
species
type `gimme <command> -h` for more details
"""
usage = "%(prog)s [-h] <subcommand> [options]"
parser = argparse.ArgumentParser(
usage=usage,
description=description,
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"-v", "--version", action="version", version=f"GimmeMotifs v{__version__}"
)
subparsers = parser.add_subparsers() # title='subcommands', metavar="<command>")
# gimme_motifs.py
p = subparsers.add_parser("motifs")
p.add_argument(
"sample", help="FASTA, BED, narrowPeak or region file.", metavar="INPUT"
)
p.add_argument("outdir", metavar="OUTDIR", help="Output directory.")
p.add_argument(
"-b",
"--background",
help=(
"Background type ({}) or a file with background "
"sequences (FASTA, BED or regions)"
).format(",".join(BED_VALID_BGS)),
metavar="BACKGROUND",
)
p.add_argument(
"-g", dest="genome", help="Genome name or fasta file", metavar="GENOME"
)
p.add_argument(
"--denovo",
dest="known",
help="Only use de novo motifs",
default=True,
action="store_false",
)
p.add_argument(
"--known",
dest="denovo",
help="Only use known motifs",
default=True,
action="store_false",
)
p.add_argument(
"--noreport",
dest="report",
help="Don't create a HTML report.",
default=True,
action="store_false",
)
p.add_argument(
"--rawscore",
dest="zscore",
help="Don't z-score normalize motif scores",
action="store_false",
default=True,
)
p.add_argument(
"--nogc",
dest="gc",
help="Don't use GC%% bins",
action="store_false",
default=True,
)
p.add_argument(
"-N",
"--nthreads",
dest="ncpus",
help="Number of threads (default %s)" % (params["ncpus"]),
metavar="INT",
type=int,
default=int(params["ncpus"]),
)
# Specific arguments for known motifs
known_grp = p.add_argument_group(title="optional arguments for known motifs")
known_grp.add_argument(
"-p",
dest="pfmfile",
help="PFM file with motifs." "(default: {0})".format(default_pfm),
default=default_pfm_file,
metavar="PFMFILE",
)
# Specific arguments for de novo motifs
denovo_grp = p.add_argument_group(title="optional arguments for de novo motifs")
denovo_grp.add_argument(
"-t",
"--tools",
dest="tools",
help="Tools to use, any combination of %s (default %s)"
% (params["available_tools"], params["tools"]),
metavar="N",
default="MEME,Homer,BioProspector",
)
denovo_grp.add_argument(
"-a",
"--analysis",
dest="analysis",
help="Analysis type: small, medium, large, xl (xl)",
metavar="ANALYSIS",
default="xl",
)
denovo_grp.add_argument(
"-k",
"--keepintermediate",
dest="keep_intermediate",
help="Don't delete intermediate files",
default=False,
action="store_true",
)
denovo_grp.add_argument(
"-S",
"--singlestrand",
dest="single",
help="Only predict motifs for single + strand (default is both)",
action="store_true",
default=False,
)
denovo_grp.add_argument(
"-f",
"--fraction",
dest="fraction",
help="Fraction of peaks to use for motif predicton (%s)" % params["fraction"],
metavar="FRACTION",
default=params["fraction"],
type=float,
)
denovo_grp.add_argument(
"-s",
"--size",
dest="size",
help=(
"Region size to use for motif prediction ({}). "
"Set to 0 to use the size of the input regions."
).format(params["size"]),
metavar="N",
default=params["size"],
type=int,
)
p.set_defaults(func=commands.motifs)
# pfmscan.py
NREPORT = 1
p = subparsers.add_parser("scan")
p.add_argument(
"inputfile", help="inputfile (FASTA, BED, regions)", metavar="INPUTFILE"
)
p.add_argument(
"-g", "--genome", dest="genome", help="Genome", metavar="GENOME", default=None
)
p.add_argument(
"-p",
"--pfmfile",
dest="pfmfile",
help="PFM file with motifs " "(default: {0})".format(default_pfm),
default=default_pfm_file,
metavar="pfmfile",
)
p.add_argument(
"-f",
"--fpr",
dest="fpr",
help="FPR for motif scanning (default 0.01)",
metavar="",
default=None,
)
p.add_argument(
"-B",
"--bgfile",
dest="bgfile",
help="background file for threshold",
metavar="",
default=None,
)
p.add_argument(
"-c",
"--cutoff",
dest="cutoff",
help="motif score cutoff or file with cutoffs",
metavar="",
default=None,
)
p.add_argument(
"-n",
"--nreport",
dest="nreport",
help="report the N best matches",
metavar="N",
default=NREPORT,
type=int,
)
p.add_argument(
"-r",
"--norc",
dest="scan_rc",
help="don't scan reverse complement (- strand)",
default=True,
action="store_false",
)
p.add_argument(
"-b",
"--bed",
action="store_true",
dest="bed",
default=False,
help="output bed format",
)
p.add_argument(
"-t",
"--table",
dest="table",
help="output counts in tabular format",
action="store_true",
default=False,
)
p.add_argument(
"-T",
"--score_table",
dest="score_table",
help="output maximum score in tabular format",
action="store_true",
default=False,
)
p.add_argument(
"-z",
"--zscore",
dest="zscore",
help="convert pfm logodds score to z-score",
action="store_true",
default=False,
)
p.add_argument(
"--gc",
dest="gcnorm",
help="use GC frequency normalized z-score",
action="store_true",
default=False,
)
p.add_argument(
"-N",
"--nthreads",
dest="ncpus",
help="Number of threads (default %s)" % (params["ncpus"]),
metavar="INT",
type=int,
default=int(params["ncpus"]),
)
p.add_argument(
"-M",
"--do_MOODS",
dest="moods",
help=argparse.SUPPRESS,
# help="experimental: use MOODS for scanning",
action="store_true",
default=False,
)
p.add_argument(
"-P",
"--pvalue",
dest="pvalue",
help=argparse.SUPPRESS,
# help="experimental: MOODS p-value cutoff",
metavar="",
type=float,
default=None,
)
p.set_defaults(func=commands.pfmscan)
p = subparsers.add_parser("maelstrom")
p.add_argument(
"inputfile", help="file with regions and clusters", metavar="INPUTFILE"
)
p.add_argument("genome", help="genome", metavar="GENOME")
p.add_argument("outdir", help="output directory", metavar="DIR")
p.add_argument(
"-p",
"--pfmfile",
dest="pfmfile",
help="PFM file with motifs " "(default: {0})".format(default_pfm),
default=default_pfm_file,
metavar="pfmfile",
)
p.add_argument(
"--no-filter",
dest="filter_redundant",
help="Don't remove redundant motifs.",
default=True,
action="store_false",
)
p.add_argument(
"-F",
"--filter_cutoff",
dest="filter_cutoff",
help="Cutoff to select non-redundant motifs. Default is 0.8, increase this value to get fewer motifs.",
default=0.8,
type=float,
metavar="FLOAT",
)
p.add_argument(
"--nocenter",
dest="center",
help="Don't mean-center the rows by default",
default=True,
action="store_false",
)
p.add_argument(
"-m",
"--methods",
dest="methods",
help="Run with specific methods",
default=None,
metavar="NAMES",
)
p.add_argument(
"-a",
"--aggregation",
dest="aggregation",
help=(
'How to combine motifs from individual methods. Default is "int_stouffer", '
"for inverse normal transform of ranks, followed by Stouffer's method to combine "
'z-scores. Alternatively, specify "stuart" for log-transformed rank aggregation '
"p-values."
),
default="int_stouffer",
metavar="method",
)
p.add_argument(
"-N",
"--nthreads",
dest="ncpus",
help="Number of threads (default %s)" % (params["ncpus"]),
metavar="INT",
type=int,
default=int(params["ncpus"]),
)
p.add_argument(
"--rawscore",
dest="zscore",
help="Don't z-score normalize motif scores",
action="store_false",
default=True,
)
p.add_argument(
"--nogc",
dest="gc",
help="Don't use GC%% bins",
action="store_false",
default=True,
)
p.set_defaults(func=commands.maelstrom)
# closest_match.py
p = subparsers.add_parser("match")
p.add_argument("pfmfile", help="File with pfms", metavar="pfmfile")
p.add_argument(
"-d",
dest="dbpfmfile",
help="File with pfms to match against " "(default: {0})".format(default_pfm),
default=default_pfm_file,
metavar="DBFILE",
)
p.add_argument(
"-n",
dest="nmatches",
help="Number of matches to return (default 1)",
default=1,
metavar="INT",
type=int,
)
p.add_argument(
"-o",
dest="img",
help="Output file with graphical report (png, svg, ps, pdf)",
metavar="FILE",
)
p.set_defaults(func=commands.match)
# pwm2logo.py
p = subparsers.add_parser("logo")
p.add_argument(
"-p", "--pfmfile", help="PFM file with motifs", metavar="pfmfile", default=None
)
p.add_argument(
"-i",
"--ids",
dest="ids",
help="Comma-separated list of motif ids (default is all ids)",
metavar="IDS",
)
p.add_argument(
"-k",
"--kind",
dest="kind",
help="Type of motif (information, frequency, energy or ensembl)",
metavar="TYPE",
default="information",
)
p.add_argument(
"--notitle",
dest="title",
help="Don't include motif ID as title",
default=True,
action="store_false",
)
p.set_defaults(func=commands.logo)
# motif_cluster.py
p = subparsers.add_parser("cluster")
p.add_argument("inputfile", help="Inputfile (PFM format)", metavar="INPUTFILE")
p.add_argument("outdir", help="Name of output directory", metavar="OUTDIR")
p.add_argument(
"-s",
dest="single",
help="Don't compare reverse complements of motifs",
default=False,
action="store_true",
)
p.add_argument(
"-t", dest="threshold", help="Cluster threshold", default=0.95, type=float
)
p.add_argument(
"-N",
"--nthreads",
dest="ncpus",
help="Number of threads (default %s)" % (params["ncpus"]),
metavar="INT",
type=int,
default=int(params["ncpus"]),
)
p.set_defaults(func=commands.cluster)
# generate_background_sequences.py
p = subparsers.add_parser("background")
p.add_argument("outputfile", help="outputfile", metavar="FILE")
p.add_argument(
"bg_type",
help="type of background sequences to generate (%s)" % ",".join(BG_TYPES),
metavar="TYPE",
)
p.add_argument(
"-i", dest="inputfile", help="input sequences (BED or FASTA)", metavar="FILE"
)
p.add_argument(
"-f",
dest="outformat",
help="output format (BED or FASTA",
metavar="TYPE",
default="fasta",
)
p.add_argument(
"-s", dest="size", help="size of random sequences", metavar="INT", type=int
)
p.add_argument(
"-n",
dest="number",
help="number of sequence to generate",
metavar="NUMBER",
default=10,
type=int,
)
p.add_argument(
"-g",
dest="genome",
help="genome version (not for type 'random')",
metavar="GENOME",
)
p.add_argument(
"-m",
dest="markov_order",
help="order of the Markov model (only for type 'random', default 1)",
metavar="N",
default=1,
type=int,
)
p.set_defaults(func=commands.background)
# get_fpr_based_pfmscan_threshold.py
p = subparsers.add_parser("threshold")
p.add_argument("pfmfile", help="File with pfms", metavar="pfmfile")
p.add_argument(
"inputfile", help="FASTA file with background sequences", metavar="FAFILE"
)
p.add_argument("fpr", help="Desired fpr", type=float, metavar="FPR")
p.set_defaults(func=commands.threshold)
# motif_localization_plots.py
p = subparsers.add_parser("location")
p.add_argument("pfmfile", help="File with pfms", metavar="pfmfile")
p.add_argument("fastafile", help="Fasta formatted file", metavar="FAFILE")
p.add_argument(
"-s",
dest="size",
help="Set size to W (default: determined from fastafile)",
metavar="INT",
type=int,
)
p.add_argument(
"-i",
dest="ids",
help="Comma-separated list of motif ids to plot (default is all ids)",
metavar="IDS",
)
p.add_argument(
"-c",
dest="cutoff",
help="Cutoff for motif scanning (default 0.95)",
type=float,
default=0.95,
)
p.set_defaults(func=commands.location)
p = subparsers.add_parser("diff")
p.add_argument(
"inputfiles",
help=(
"FASTA-formatted inputfiles OR a BED file with an identifier in the 4th "
"column, for instance a cluster number."
),
metavar="FAFILES",
)
p.add_argument("bgfile", help="FASTA-formatted background file", metavar="BGFAFILE")
p.add_argument("outputfile", help="outputfile (image)", metavar="PNGFILE")
p.add_argument(
"-p",
"--pfmfile",
dest="pfmfile",
help="PFM file with motifs " "(default: {0})".format(default_pfm),
default=default_pfm_file,
metavar="pfmfile",
)
p.add_argument(
"-c",
"--cutoff",
dest="cutoff",
help="motif score cutoff or file with cutoffs (default 0.9)",
metavar="",
default=0.9,
)
p.add_argument(
"-e",
"--enrichment",
dest="minenr",
help="minimum enrichment in at least one of the datasets compared to background",
metavar="MINENR",
type=float,
default=3,
)
p.add_argument(
"-f",
"--frequency",
dest="minfreq",
help="minimum frequency in at least one of the datasets",
metavar="MINFREQ",
type=float,
default=0.01,
)
p.add_argument(
"-g",
"--genome",
dest="genome",
help=(
"Genome; only necessary in combination with a BED file with clusters "
"as inputfile."
),
metavar="GENOME",
)
p.set_defaults(func=commands.diff)
p.set_defaults(func=commands.logo)
p = subparsers.add_parser("prediction")
p.add_argument("tool", help="Specific motif prediction tool to run", metavar="NAME")
p.add_argument("infile", help="Input FASTA file", metavar="FILE")
p.add_argument("outfile", help="Output PFM file", metavar="FILE")
p.add_argument(
"-p",
dest="paramfile",
help="YAML file with paramaters",
default=None,
metavar="FILE",
)
p.set_defaults(func=commands.prediction)
class Strictness(argparse.Action):
def __call__(self, parser, ns, values, option):
if "strict" in option:
setattr(ns, self.dest, "strict")
if "medium" in option:
setattr(ns, self.dest, "medium")
if "lenient" in option:
setattr(ns, self.dest, "lenient")
p = subparsers.add_parser(
"motif2factors",
help="Generate a motif2factors file based on orthology for your species of interest.",
)
p.add_argument(
"--new-reference",
help="The assembly the new motif2factors file will be based on.",
metavar="ASSEMBLY",
required=True,
nargs="+",
)
p.add_argument(
"--database",
help="The database you want to change convert to your species of interest. (default is gimme.vertebrate.v5.0)",
metavar="db",
default="gimme.vertebrate.v5.0",
)
p.add_argument(
"--database-references",
help="The assembly(s) on which the orginal motif2factors is based on. (default is human and mouse)",
metavar="ASSEMBLY",
nargs="+",
)
p.add_argument(
"--ortholog-references",
help="Extra assemblies for better orthology inference between the new reference and database reference. (default is a range of vertebrate species)",
metavar="ASSEMBLY",
nargs="+",
)
p.add_argument(
"--genomes_dir",
help="Where to find/store genomepy genomes. Defaults to the genomepy config settings.",
metavar="DIR",
)
p.add_argument(
"--tmpdir",
help="Where to place intermediate files. Defaults to system temp.",
metavar="DIR",
)
p.add_argument(
"--outdir",
help="Where to save the results to. Defaults to current working directory.",
metavar="OUTDIR",
default=".",
)
p.add_argument(
"--strict",
"--medium",
"--lenient",
default="lenient",
help="How strict should the names of the genes in the assembly be followed. Strict: base names only on what is in the annotation file; Medium: base on annotation file, as well as on mygene.info name and symbol query; Lenient: based on annotation file, and mygeneinfo name, symbol, alias, other_names, accession, accession.protein, refseq, refseq.protein, ensembl, ensembl.gene. Lenient is the default, but in case of false-positive hits you can tune this stricter.",
dest="strategy",
action=Strictness,
nargs=0,
)
p.add_argument(
"--threads",
help="Maximum number of parallel threads used.",
metavar="INT",
default=24,
)
p.add_argument(
"--keep-intermediate",
dest="keep_intermediate",
help="Keep temporary files, do not delete tmpdir.",
default=False,
action="store_true",
)
p.set_defaults(func=commands.motif2factors)
if len(sys_args) == 0:
parser.print_help()
elif sys_args[0] == "roc":
print(
"This command is deprecated. "
"Use the following command for the same functionality:"
)
print()
print("$ gimme motifs <inputfile> <outdir> --known")
sys.exit(1)
else:
ignored = ["-v", "--version", "-h", "--help"]
if len(sys_args) == 1 and sys_args[0] not in ignored:
print(
"\033[93mtype `gimme {} -h` for more details\033[0m\n".format(
sys_args[-1]
)
)
args = parser.parse_args(sys_args)
if hasattr(args, "genome"):
if args.genome is not None:
if not check_genome(args.genome):
print(
"Genome not found. Have you installed your genome with genomepy?"
)
print("See https://github.com/simonvh/genomepy for details.")
print("Alternatively, you can specify a FASTA file.")
exit(1)
args.func(args)
| StarcoderdataPython |
8065409 | import create_bwc_index
import logging
import os
import random
import shutil
import subprocess
import sys
import tempfile
def fetch_version(version):
logging.info('fetching ES version %s' % version)
if subprocess.call([sys.executable, os.path.join(os.path.split(sys.argv[0])[0], 'get-bwc-version.py'), version]) != 0:
raise RuntimeError('failed to download ES version %s' % version)
def main():
'''
Creates a static back compat index (.zip) with mixed 0.20 (Lucene 3.x) and 0.90 (Lucene 4.x) segments.
'''
logging.basicConfig(format='[%(levelname)s] [%(asctime)s] %(message)s', level=logging.INFO,
datefmt='%Y-%m-%d %I:%M:%S %p')
logging.getLogger('elasticsearch').setLevel(logging.ERROR)
logging.getLogger('urllib3').setLevel(logging.WARN)
tmp_dir = tempfile.mkdtemp()
try:
data_dir = os.path.join(tmp_dir, 'data')
logging.info('Temp data dir: %s' % data_dir)
first_version = '0.20.6'
second_version = '0.90.6'
index_name = 'index-%s-and-%s' % (first_version, second_version)
# Download old ES releases if necessary:
release_dir = os.path.join('backwards', 'elasticsearch-%s' % first_version)
if not os.path.exists(release_dir):
fetch_version(first_version)
node = create_bwc_index.start_node(first_version, release_dir, data_dir, cluster_name=index_name)
client = create_bwc_index.create_client()
# Creates the index & indexes docs w/ first_version:
create_bwc_index.generate_index(client, first_version, index_name)
# Make sure we write segments:
flush_result = client.indices.flush(index=index_name)
if not flush_result['ok']:
raise RuntimeError('flush failed: %s' % str(flush_result))
segs = client.indices.segments(index=index_name)
shards = segs['indices'][index_name]['shards']
if len(shards) != 1:
raise RuntimeError('index should have 1 shard but got %s' % len(shards))
first_version_segs = shards['0'][0]['segments'].keys()
create_bwc_index.shutdown_node(node)
print('%s server output:\n%s' % (first_version, node.stdout.read().decode('utf-8')))
node = None
release_dir = os.path.join('backwards', 'elasticsearch-%s' % second_version)
if not os.path.exists(release_dir):
fetch_version(second_version)
# Now also index docs with second_version:
node = create_bwc_index.start_node(second_version, release_dir, data_dir, cluster_name=index_name)
client = create_bwc_index.create_client()
# If we index too many docs, the random refresh/flush causes the ancient segments to be merged away:
num_docs = 10
create_bwc_index.index_documents(client, index_name, 'doc', num_docs)
# Make sure we get a segment:
flush_result = client.indices.flush(index=index_name)
if not flush_result['ok']:
raise RuntimeError('flush failed: %s' % str(flush_result))
# Make sure we see mixed segments (it's possible Lucene could have "accidentally" merged away the first_version segments):
segs = client.indices.segments(index=index_name)
shards = segs['indices'][index_name]['shards']
if len(shards) != 1:
raise RuntimeError('index should have 1 shard but got %s' % len(shards))
second_version_segs = shards['0'][0]['segments'].keys()
#print("first: %s" % first_version_segs)
#print("second: %s" % second_version_segs)
for segment_name in first_version_segs:
if segment_name in second_version_segs:
# Good: an ancient version seg "survived":
break
else:
raise RuntimeError('index has no first_version segs left')
for segment_name in second_version_segs:
if segment_name not in first_version_segs:
# Good: a second_version segment was written
break
else:
raise RuntimeError('index has no second_version segs left')
create_bwc_index.shutdown_node(node)
print('%s server output:\n%s' % (second_version, node.stdout.read().decode('utf-8')))
node = None
create_bwc_index.compress_index('%s-and-%s' % (first_version, second_version), tmp_dir, 'src/test/resources/org/elasticsearch/rest/action/admin/indices/upgrade')
finally:
if node is not None:
create_bwc_index.shutdown_node(node)
shutil.rmtree(tmp_dir)
if __name__ == '__main__':
main()
| StarcoderdataPython |
9615104 | #!/usr/bin/env python3
def rk4(m , h_eff , i_s , dt):
k = np.zeros((4,3))
mm = np.zeros((3,3))
m_new = np.zeros((1,3))
# Step 0
k[0 , :] = dm(m , h_eff , i_s)
mm[0 , :] = m + k[0 , :] * dt / 2
# Step 1
k[1 , :] = dm(mm[0 , :] , h_eff , i_s)
mm[1 , :] = m + k[1 , :] * dt / 2
# Step 2
k[2 , :] = dm(mm[1 , :] , h_eff , i_s)
mm[2 , :] = m + k[2 , :] * dt
# Step 3
k[3 , :] = dm(mm[2 , :] , h_eff , i_s)
# Return new 'm'
m_new = (k[0 , :] + 2 * (k[1 , :] + k[2 , :]) + k[3 , :])* dt / 6
return m_new
| StarcoderdataPython |
11399198 |
import uuid as _uuid
from copy import copy as _copy
import os as _os
from Acquire.Service import Service as _Service
from ._errors import StorageServiceError
__all__ = ["StorageService"]
class StorageService(_Service):
"""This is a specialisation of Service for Storage Services"""
def __init__(self, other=None):
if isinstance(other, _Service):
self.__dict__ = _copy(other.__dict__)
if not self.is_storage_service():
from Acquire.Storage import StorageServiceError
raise StorageServiceError(
"Cannot construct an StorageService from "
"a service which is not an storage service!")
else:
_Service.__init__(self)
self._storage_compartment_id = None
def _call_local_function(self, function, args):
"""Internal function called to short-cut local 'remote'
function calls
"""
from storage.route import storage_functions as _storage_functions
from admin.handler import create_handler as _create_handler
handler = _create_handler(_storage_functions)
return handler(function=function, args=args)
| StarcoderdataPython |
9732764 | # -*- coding: utf-8 -*-
"""Module containing useful 1D plotting abstractions on top of matplotlib."""
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.collections import LineCollection
from matplotlib.colors import Normalize
from sliceplots.util import _idx_from_val, _make_ax
def plot_multicolored_line(
*,
ax=None,
x,
y,
other_y,
cmap="viridis",
vmin=None,
vmax=None,
linewidth=2,
alpha=1
):
r"""Plots a line colored based on the values of another array.
Plots the curve ``y(x)``, colored based on the values in ``other_y``.
Parameters
----------
ax : :py:class:`~matplotlib.axes.Axes`, optional
Axes instance, for plotting, defaults to ``None``.
If ``None``, a new :py:class:`~matplotlib.figure.Figure` will be created.
y : 1d array_like
The dependent variable.
x : 1d array_like
The independent variable.
other_y: 1d array_like
The values whose magnitude will be converted to colors.
cmap : str, optional
The used colormap (defaults to "viridis").
vmin : float, optional
Lower normalization limit
vmax : float, optional
Upper normalization limit
linewidth : float, optional (default 2)
Width of the plotted line
alpha : float, optional (default 1)
Line transparency, between 0 and 1
Returns
-------
ax, line : Axes, LineCollection
Main Axes and plotted line
Raises
------
AssertionError
If the length of `y` and `other_y` do not match.
References
----------
``matplotlib`` `example <https://matplotlib.org/gallery/lines_bars_and_markers/multicolored_line.html>`_.
Examples
--------
We plot a curve and color it based on the value of its first derivative.
.. plot::
:include-source:
import numpy as np
from matplotlib import pyplot
from sliceplots import plot_multicolored_line
x = np.linspace(0, 3 * np.pi, 500)
y = np.sin(x)
dydx = np.gradient(y) * 100 # first derivative
_, ax = pyplot.subplots()
plot_multicolored_line(ax=ax, x=x, y=y, other_y=dydx, label="dydx")
ax.set(ylabel="y", xlabel="x")
"""
if not (len(y) == len(other_y)):
raise AssertionError("The two 'y' arrays must have the same size!")
ax = ax or _make_ax()
# Create a set of line segments so that we can color them individually
# This creates the points as a N x 1 x 2 array so that we can stack points
# together easily to get the segments. The segments array for line collection
# needs to be (numlines) x (points per line) x 2 (for x and y)
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
if vmin is None:
vmin = np.min(other_y)
if vmax is None:
vmax = np.max(other_y)
# Create a continuous norm to map from data points to colors
norm = Normalize(vmin, vmax)
lc = LineCollection(segments, cmap=cmap, norm=norm)
# Set the values used for colormapping
lc.set_array(other_y)
lc.set_linewidth(linewidth)
lc.set_alpha(alpha)
line = ax.add_collection(lc)
return ax, line
def plot1d_break_x(*, ax=None, h_axis, v_axis, param, slice_opts):
r"""Line plot with a broken x-axis.
Parameters
----------
ax : :py:class:`~matplotlib.axes.Axes`, optional
Axes instance, for plotting.
If ``None``, a new :py:class:`~matplotlib.figure.Figure` will be created.
Defaults to ``None``.
h_axis : 1d array_like
x-axis data.
v_axis : 1d array_like
y-axis data.
param : dict
Axes limits and labels.
slice_opts : dict
Options for plotted line.
Returns
-------
ax_left, ax_right : tuple of Axes
Left and right Axes of the split plot.
Examples
--------
.. plot::
:include-source:
import numpy as np
from matplotlib import pyplot
from sliceplots import plot1d_break_x
uu = np.linspace(0, np.pi, 128)
data = np.cos(uu - 0.5) * np.cos(uu.reshape(-1, 1) - 1.0)
_, ax = pyplot.subplots()
plot1d_break_x(
ax=ax,
h_axis=uu,
v_axis=data[data.shape[0] // 2, :],
param={
"xlim_left": (0, 1),
"xlim_right": (2, 3),
"xlabel": r"$x$ ($\mu$m)",
"ylabel": r"$\rho$ (cm${}^{-3}$)",
},
slice_opts={"ls": "--", "color": "#d62728"})
"""
ax_left = ax or _make_ax()
divider = make_axes_locatable(ax_left)
ax_right = divider.new_horizontal(size="100%", pad=1)
ax_left.figure.add_axes(ax_right)
ax_left.plot(h_axis, v_axis, **slice_opts)
ax_left.set_ylabel(param["ylabel"])
ax_left.set_xlabel(param["xlabel"])
ax_left.set_xlim(*param["xlim_left"])
ax_left.spines["right"].set_visible(False)
ax_left.yaxis.set_ticks_position("left")
ax_right.plot(h_axis, v_axis, **slice_opts)
ax_right.set_ylabel(param["ylabel"])
ax_right.set_xlabel(param["xlabel"])
ax_right.yaxis.set_label_position("right")
ax_right.set_xlim(*param["xlim_right"])
ax_right.spines["left"].set_visible(False)
ax_right.yaxis.set_ticks_position("right")
# From https://matplotlib.org/examples/pylab_examples/broken_axis.html
d = 0.015 # how big to make the diagonal lines in axes coordinates
# arguments to pass plot, just so we don't keep repeating them
kwargs = dict(transform=ax_left.transAxes, color="k", clip_on=False)
ax_left.plot((1 - d, 1 + d), (-d, +d), **kwargs)
ax_left.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs)
kwargs.update(transform=ax_right.transAxes) # switch to the right axes
ax_right.plot((-d, +d), (1 - d, 1 + d), **kwargs)
ax_right.plot((-d, +d), (-d, +d), **kwargs)
return ax_left, ax_right
def plot1d(*, ax=None, h_axis, v_axis, xlabel=r"", ylabel=r"", **kwargs):
r"""Plot the data with given labels and plot options.
Parameters
----------
ax : class:`~matplotlib.axes.Axes`, optional
Axes instance, for plotting.
If ``None``, a new :class:`~matplotlib.figure.Figure` will be created.
Defaults to ``None``.
h_axis : :py:class:`np.ndarray`
x-axis data.
v_axis : :py:class:`np.ndarray`
y-axis data.
xlabel : str, optional
x-axis label.
ylabel : str, optional
y-axis label.
kwargs : dict, optional
Other arguments for :meth:`~matplotlib.axes.Axes.plot`.
Returns
-------
ax : Axes
Modified Axes, containing plot.
Examples
--------
>>> import numpy as np
>>> uu = np.linspace(0, np.pi, 128)
>>> data = np.cos(uu - 0.5) * np.cos(uu.reshape(-1, 1) - 1.0)
>>> plot1d(
... h_axis=uu,
... v_axis=data[data.shape[0] // 2, :],
... xlabel=r"$z$ ($\mu$m)",
... ylabel=r"$a_0$",
... xlim=[0, 3],
... ylim=[-1, 1],
... color="#d62728",
... ) #doctest: +ELLIPSIS
<matplotlib.axes._subplots.AxesSubplot object at 0x...>
"""
xlim = kwargs.pop("xlim", [np.min(h_axis), np.max(h_axis)])
ylim = kwargs.pop("ylim", [np.min(v_axis), np.max(v_axis)])
#
xmin_idx, xmax_idx = (
_idx_from_val(h_axis, xlim[0]),
_idx_from_val(h_axis, xlim[1]),
)
#
h_axis = h_axis[xmin_idx:xmax_idx]
data = v_axis[xmin_idx:xmax_idx]
#
label = {"x": xlabel, "y": ylabel}
text = kwargs.pop("text", "")
#
ax = ax or _make_ax()
ax.plot(h_axis, data, **kwargs)
ax.set(
xlim=[h_axis[0], h_axis[-1]], ylim=ylim, ylabel=label["y"], xlabel=label["x"]
)
ax.text(0.02, 0.95, text, transform=ax.transAxes, color="firebrick")
return ax
| StarcoderdataPython |
4929166 | <gh_stars>0
"""
radiomanager.models.types
=========================
Custom SQLAlchemy data types
"""
import uuid
from sqlalchemy.types import BINARY, TypeDecorator
class UUID(TypeDecorator):
"""
UUID SQLAlchemy type adapter. Based on https://docs.sqlalchemy.org/en/rel_0_9/core/custom_types.html?highlight=guid#backend-agnostic-guid-type
"""
impl = BINARY
def load_dialect_impl(self, dialect):
return dialect.type_descriptor(BINARY(16))
def process_bind_param(self, value, dialect):
if value is None:
return None
if isinstance(value, bytes):
return value
elif isinstance(value, str):
return uuid.UUID(hex=value).bytes
elif isinstance(value, uuid.UUID):
return value.bytes
else:
raise TypeError("Unable to convert %s to uuid.UUID" %
(type(value),))
def process_result_value(self, value, dialect):
if value is None:
return None
return uuid.UUID(bytes=value)
| StarcoderdataPython |
237841 | import sys
import json
import time
import beanstalkc
import tornado.ioloop
import tornado.web
from threading import Thread
from job import jobs, JobInfo
from train import Trainer
from predict import Predictor
from config import BEANSTALK_HOST, BEANSTALK_PORT, BEANSTALK_YAML
from constant import TRAIN, PREDICT
io_loop = tornado.ioloop.IOLoop.instance()
def create_beanstalk():
beanstalk = beanstalkc.Connection(host=BEANSTALK_HOST, port=BEANSTALK_PORT, parse_yaml=BEANSTALK_YAML)
return beanstalk
class WorkflowHandler(tornado.web.RequestHandler):
def get(self):
self.render("workflow.html")
class JobCheckHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self, jobId):
jobId = int(jobId)
jobInfo = jobs.get(jobId, None)
if jobInfo is None:
jobInfo = JobInfo(jobId)
jobs[jobId] = jobInfo
if jobInfo.data is not None:
self.write(jobInfo.data)
self.finish()
return
jobInfo.handlers.append(self)
class TrainHandler(tornado.web.RequestHandler):
def post(self):
data = json.loads(self.request.body)
data["jobType"] = TRAIN
beanstalk = create_beanstalk()
ts = int(time.time())
jobId = beanstalk.put(json.dumps(data))
self.write(json.dumps({"jobId": jobId, "ts": ts}))
beanstalk.close()
return
class PredictHandler(tornado.web.RequestHandler):
def post(self):
data = json.loads(self.request.body)
data["jobType"] = PREDICT
beanstalk = create_beanstalk()
ts = int(time.time())
jobId = beanstalk.put(json.dumps(data))
self.write(json.dumps({"jobId": jobId, "ts": ts}))
beanstalk.close()
return
def main():
application = tornado.web.Application([
(r"/train/", TrainHandler),
(r"/predict/", PredictHandler),
(r"/check/(\d*)/", JobCheckHandler),
(r"/workflow/", WorkflowHandler),
(r'/static/(.*)', tornado.web.StaticFileHandler, {'path': "./static/"})
], template_path="./template")
application.listen(8888)
io_loop.start()
def job_main():
beanstalk = create_beanstalk()
print "Job queue starts..."
try:
while True:
try:
job = beanstalk.reserve()
except beanstalkc.DeadlineSoon:
continue
request = json.loads(job.body)
jobId = job.jid
print 'Working on job %s...' % jobId
try:
jobType = request["jobType"]
if jobType == TRAIN:
category = request["category"]
model = request["model"]
trainer = Trainer.create(category, model)
if trainer:
data = {}
data["table_name"] = request["inputTableName"]
data["feature_names"] = request.get("features", None)
data["target_name"] = request.get("target", None)
ret = trainer.run(**data)
print 'Job %s finished.' % jobId
else:
ret = []
print 'No trainer for job %s.' % jobId
elif jobType == PREDICT:
modelId = request["modelId"]
predictor = Predictor(modelId)
data = {}
data["table_name"] = request["inputTableName"]
ret = predictor.run(**data)
print 'Job %s finished.' % jobId
except:
ret = []
print 'Error on job %s.' % jobId
job.delete()
#time.sleep(30)
io_loop.add_callback(job_finished, jobId, ret)
except (KeyboardInterrupt, SystemExit):
beanstalk.close()
sys.exit()
def job_finished(jobId, data):
jobInfo = jobs.get(jobId, None)
if jobInfo is None:
jobInfo = JobInfo(jobId)
jobs[jobId] = jobInfo
jobInfo.data = data
for h in jobInfo.handlers:
h.write(jobInfo.data)
h.finish()
del jobInfo.handlers[:]
if __name__ == '__main__':
job_thread = Thread(target=job_main)
job_thread.daemon = True
job_thread.start()
main()
| StarcoderdataPython |
3477549 | import argparse
import pathlib
import os
import h5py
import numpy as np
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_dir', default="s3dis_raw", type=str, help='Path to .npy format dataset')
parser.add_argument('--output_dir', default="", type=str, help='Path to output directory')
parser.add_argument('--num_points', default=4096, type=int, help='Number of input points')
parser.add_argument('--grid_size', default=1, type=float, help='Size of each grid')
parser.add_argument('--stride', default=0.5, type=float, help='Stride')
args = parser.parse_args()
return args
def save_h5ins(h5_filename, data, label, gid, data_dtype='uint8', label_dtype='uint8'):
h5_fout = h5py.File(h5_filename, "w")
h5_fout.create_dataset(
'data', data=data,
compression='gzip', compression_opts=4,
dtype=data_dtype)
h5_fout.create_dataset(
'seglabel', data=label,
compression='gzip', compression_opts=1,
dtype=label_dtype)
h5_fout.create_dataset(
'pid', data=gid,
compression='gzip', compression_opts=1,
dtype=label_dtype)
h5_fout.close()
def sample_data(data, num_sample):
""" data is in N x ...
we want to keep num_samplexC of them.
if N > num_sample, we will randomly keep num_sample of them.
if N < num_sample, we will randomly duplicate samples.
"""
N = data.shape[0]
if (N == num_sample):
return data, range(N)
elif (N > num_sample):
sample = np.random.choice(N, num_sample)
return data[sample, ...], sample
else:
sample = np.random.choice(N, num_sample-N)
dup_data = data[sample, ...]
return np.concatenate([data, dup_data], 0), list(range(N))+list(sample)
def sample_data_label(data, label, inslabel, num_sample):
new_data, sample_indices = sample_data(data, num_sample)
new_label = label[sample_indices]
new_inslabel = inslabel[sample_indices]
return new_data, new_label, new_inslabel
def room2blocks(data, label, inslabel, num_point, block_size=1.0, stride=1.0,
random_sample=False, sample_num=None, sample_aug=1):
""" Prepare block training data.
Args:
data: N x 6 numpy array, 012 are XYZ in meters, 345 are RGB in [0,1]
assumes the data is shifted (min point is origin) and aligned
(aligned with XYZ axis)
label: N size uint8 numpy array from 0-12
num_point: int, how many points to sample in each block
block_size: float, physical size of the block in meters
stride: float, stride for block sweeping
random_sample: bool, if True, we will randomly sample blocks in the room
sample_num: int, if random sample, how many blocks to sample
[default: room area]
sample_aug: if random sample, how much aug
Returns:
block_datas: K x num_point x 6 np array of XYZRGB, RGB is in [0,1]
block_labels: K x num_point x 1 np array of uint8 labels
TODO: for this version, blocking is in fixed, non-overlapping pattern.
"""
assert(stride<=block_size)
limit = np.amax(data, 0)[0:3]
# Get the corner location for our sampling blocks
xbeg_list = []
ybeg_list = []
if not random_sample:
num_block_x = np.maximum(int(np.ceil((limit[0] - block_size) / stride)) + 1, 1)
num_block_y = np.maximum(int(np.ceil((limit[1] - block_size) / stride)) + 1, 1)
for i in range(num_block_x):
if i % 2 == 0:
for j in range(num_block_y):
xbeg_list.append(i*stride)
ybeg_list.append(j*stride)
else:
for j in range(num_block_y)[::-1]:
xbeg_list.append(i*stride)
ybeg_list.append(j*stride)
else:
num_block_x = int(np.ceil(limit[0] / block_size))
num_block_y = int(np.ceil(limit[1] / block_size))
if sample_num is None:
sample_num = num_block_x * num_block_y * sample_aug
for _ in range(sample_num):
xbeg = np.random.uniform(-block_size, limit[0])
ybeg = np.random.uniform(-block_size, limit[1])
xbeg_list.append(xbeg)
ybeg_list.append(ybeg)
# Collect blocks
block_data_list = []
block_label_list = []
block_inslabel_list = []
idx = 0
for idx in range(len(xbeg_list)):
xbeg = xbeg_list[idx]
ybeg = ybeg_list[idx]
xcond = (data[:,0]<=xbeg+block_size) & (data[:,0]>=xbeg)
ycond = (data[:,1]<=ybeg+block_size) & (data[:,1]>=ybeg)
cond = xcond & ycond
if np.sum(cond) < 100: # discard block if there are less than 100 pts.
continue
block_data = data[cond, :]
block_label = label[cond]
block_inslabel = inslabel[cond]
# randomly subsample data
block_data_sampled, block_label_sampled, block_inslabel_sampled = sample_data_label(block_data, block_label, block_inslabel, num_point)
block_data_list.append(np.expand_dims(block_data_sampled, 0))
block_label_list.append(np.expand_dims(block_label_sampled, 0))
block_inslabel_list.append(np.expand_dims(block_inslabel_sampled, 0))
return np.concatenate(block_data_list, 0), \
np.concatenate(block_label_list, 0),\
np.concatenate(block_inslabel_list, 0)
def room2blocks_plus_normalized(data_label, num_point, block_size, stride,
random_sample, sample_num, sample_aug):
""" room2block, with input filename and RGB preprocessing.
for each block centralize XYZ, add normalized XYZ as 678 channels
"""
data = data_label[:,0:6]
data[:,3:6] /= 255.0
label = data_label[:,-2].astype(np.uint8)
inslabel = data_label[:,-1].astype(np.uint8)
max_room_x = max(data[:,0])
max_room_y = max(data[:,1])
max_room_z = max(data[:,2])
data_batch, label_batch, inslabel_batch = room2blocks(data, label, inslabel, num_point, block_size, stride,
random_sample, sample_num, sample_aug)
new_data_batch = np.zeros((data_batch.shape[0], num_point, 9))
for b in range(data_batch.shape[0]):
new_data_batch[b, :, 6] = data_batch[b, :, 0]/max_room_x
new_data_batch[b, :, 7] = data_batch[b, :, 1]/max_room_y
new_data_batch[b, :, 8] = data_batch[b, :, 2]/max_room_z
minx = min(data_batch[b, :, 0])
miny = min(data_batch[b, :, 1])
data_batch[b, :, 0] -= (minx+block_size/2)
data_batch[b, :, 1] -= (miny+block_size/2)
new_data_batch[:, :, 0:6] = data_batch
return new_data_batch, label_batch, inslabel_batch
def room2blocks_wrapper_normalized(data_label_filename, num_point, block_size=1.0, stride=1.0,
random_sample=False, sample_num=None, sample_aug=1):
if data_label_filename[-3:] == 'txt':
data_label = np.loadtxt(data_label_filename)
elif data_label_filename[-3:] == 'npy':
data_label = np.load(data_label_filename)
else:
print('Unknown file type! exiting.')
exit()
return room2blocks_plus_normalized(data_label, num_point, block_size, stride,
random_sample, sample_num, sample_aug)
def main():
data_dtype = 'float32'
flabel_dtype = 'int32'
dataset_root = pathlib.Path(args.dataset_dir).resolve()
files = dataset_root.glob("*.npy")
output_dir = pathlib.Path(args.output_dir).resolve().joinpath('hdf5_{}m_grid_{}m_stride_{}_points'.format(args.grid_size, args.stride, args.num_points))
output_dir.mkdir(exist_ok=True)
for i, file in enumerate(files):
try:
assert file.exists()
filename = str(file)
h5_filename = output_dir.joinpath('{}.h5'.format(filename.split("/")[-1].strip('.npy')))
if h5_filename.exists():
continue
data, label, inslabel = room2blocks_wrapper_normalized(filename, args.num_points, block_size=args.grid_size, stride=args.stride,
random_sample=False, sample_num=None)
save_h5ins(h5_filename, data, label, inslabel, "float32", "int32")
print(h5_filename)
except Exception as e:
print("error", file, str(output_dir))
print(e)
print("Total samples: {}".format(i+1))
print("=============================================")
if __name__ == "__main__":
args = parse_args()
print(args)
main()
| StarcoderdataPython |
185788 | <reponame>abirabedinkhan/Ducky-Script-Compiler
import time
import pyautogui
import keyboard
import sys
import os
import requests
try:
duckyScriptPath = sys.argv[1]
except:
duckyScriptPath = 'payload.dd'
f = open(duckyScriptPath,"r",encoding='utf-8')
duckyScript = f.readlines()
duckyScript = [x.strip() for x in duckyScript]
defaultDelay = 0
if duckyScript[0][:7] == "DEFAULT":
defaultDelay = int(duckyScript[0][:13]) / 1000
duckyCommands = [
"WINDOWS", "GUI", "APP", "MENU", "SHIFT", "ALT", "CONTROL", "CTRL", "DOWNARROW", "DOWN",
"LEFTARROW", "LEFT", "RIGHTARROW", "RIGHT", "UPARROW", "UP", "BREAK", "PAUSE", "CAPSLOCK", "DELETE", "END",
"ESC", "ESCAPE", "HOME", "INSERT", "NUMLOCK", "PAGEUP", "PAGEDOWN", "PRINTSCREEN", "SCROLLLOCK", "SPACE",
"TAB", "ENTER", " a", " b", " c", " d", " e", " f", " g", " h", " i", " j", " k", " l", " m", " n", " o", " p", " q", " r", " s", " t",
" u", " v", " w", " x", " y", " z", " A", " B", " C", " D", " E", " F", " G", " H", " I", " J", " K", " L", " M", " N", " O", " P",
" Q", " R", " S", " T", " U", " V", " W", " X", " Y", " Z"
]
keyboardCommands = [
"win", "win", "win", "win", "shift", "alt", "ctrl", "ctrl", "down", "down",
"left", "left", "right", "right", "up", "up", "pause", "pause", "capslock", "delete", "end",
"esc", "escape", "home", "insert", "numlock", "pageup", "pagedown", "printscreen", "scrolllock", "space",
"tab", "enter", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t",
"u", "v", "w", "x", "y", "z", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p",
"q", "r", "s", "t", "u", "v", "w", "x", "y", "z"
]
def lex(line, prev):
# Variables:
data = []
# Check if the statement is a delay
if line[0:5] == "DELAY" :
time.sleep(float(line[6:]) / 1000)
elif line[0:4] == "PATH":
if line[5:].startswith('https://'):
try:
webScipts = requests.get(line[5:]).text
except:
webScipts = 'REM'
print('Please check your internet connection!')
duckyScript = webScipts.split('\n')
else:
duckyScriptPath = line[5:]
f = open(duckyScriptPath,"r",encoding='utf-8')
duckyScript = f.readlines()
duckyScript = [x.strip() for x in duckyScript]
prev = ''
for line in duckyScript:
lex(line, prev)
previous = line
prev = previous
elif line[0:2] == 'OS':
checkline = line[3:]
if checkline[0:2] == 'cd':
try:
os.chdir(checkline[3:])
except:
print('Please check your path again')
else:
os.system(line[3:])
elif line[0:6] == "STRING" :
pyautogui.typewrite(line[7:], interval=0.02)
elif line[0:6] == "REPEAT" :
for i in range(int(line[7:]) - 1):
lex(prev, prev)
elif line[0:3] == "REM":
line.replace("REM", "#")
elif line == '' or line == None:
line = 'REM'
else:
for j in range(len(keyboardCommands)):
if line.find(duckyCommands[j]) != -1:
data.append(keyboardCommands[j])
keyboard.press_and_release('+'.join(data))
data = []
# Write Default Delay if it exists:
if defaultDelay != 0:
time.sleep(defaultDelay)
prev = ''
for line in duckyScript:
lex(line, prev)
previous = line
prev = previous
| StarcoderdataPython |
6467293 | <reponame>canavandl/colour
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.models.rgb` module.
"""
from __future__ import division, unicode_literals
import numpy as np
import sys
if sys.version_info[:2] <= (2, 6):
import unittest2 as unittest
else:
import unittest
from colour.models import (
RGB_COLOURSPACES,
XYZ_to_RGB,
RGB_to_XYZ,
RGB_to_RGB)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = ['sRGB_LINEAR_COLORCHECKER_2005',
'ACES_COLORCHECKER_2005',
'sRGB_TRANSFER_FUNCTION',
'sRGB_INVERSE_TRANSFER_FUNCTION',
'TestXYZ_to_RGB',
'TestRGB_to_XYZ',
'TestRGB_to_RGB']
sRGB_LINEAR_COLORCHECKER_2005 = [
[[0.4316, 0.3777, 0.1008],
(0.11518474980142972, 0.1008, 0.050893725178713274),
np.array([0.45293517, 0.31732158, 0.26414773])],
[[0.4197, 0.3744, 0.34950000000000003],
(0.39178725961538463, 0.34950000000000003, 0.19220633012820515),
np.array([0.77875824, 0.5772645, 0.50453169])],
[[0.276, 0.3016, 0.18359999999999999],
(0.1680159151193634, 0.18359999999999999, 0.25713740053050399),
np.array([0.35505307, 0.47995567, 0.61088035])],
[[0.3703, 0.4499, 0.1325],
(0.10905701266948212, 0.13250000000000001, 0.052952878417426061),
np.array([0.35179242, 0.42214077, 0.25258942])],
[[0.2999, 0.2856, 0.2304],
(0.24193613445378148, 0.23039999999999999, 0.33438655462184863),
np.array([0.50809894, 0.50196494, 0.69048098])],
[[0.2848, 0.3911, 0.4178],
(0.30424300690360523, 0.4178, 0.34622597801073896),
np.array([0.36240083, 0.74473539, 0.67467032])],
[[0.5295, 0.4055, 0.3118],
(0.40714697903822439, 0.31180000000000002, 0.049980271270036999),
np.array([0.87944466, 0.48522956, 0.18327685])],
[[0.2305, 0.2106, 0.11259999999999999],
(0.1232397910731244, 0.11259999999999999, 0.29882307692307686),
np.array([0.26605806, 0.35770363, 0.66743852])],
[[0.5012, 0.3273, 0.1938],
(0.29676920256645278, 0.1938, 0.10154812098991754),
np.array([0.77782346, 0.32138719, 0.38060248])],
[[0.3319, 0.2482, 0.0637],
(0.085181426269137786, 0.063700000000000007, 0.1077664383561644),
np.array([0.36729282, 0.22739265, 0.41412433])],
[[0.3984, 0.5008, 0.4446],
(0.35369137380191684, 0.4446, 0.089488178913738003),
np.array([0.62266646, 0.7410742, 0.24626906])],
[[0.4957, 0.4427, 0.4357],
(0.48786196069573068, 0.43569999999999998, 0.060625976959566286),
np.array([0.90369041, 0.63376348, 0.15395733])],
[[0.2018, 0.1692, 0.0575],
(0.068578605200945636, 0.057500000000000002, 0.21375591016548467),
np.array([0.1384956, 0.24831912, 0.57681467])],
[[0.3253, 0.5032, 0.2318],
(0.14985003974562797, 0.23180000000000001, 0.079001788553259192),
np.array([0.26252953, 0.58394952, 0.29070622])],
[[0.5686, 0.3303, 0.1257],
(0.21638819255222524, 0.12570000000000001, 0.038474931880109003),
np.array([0.70564037, 0.19094729, 0.22335249])],
[[0.4697, 0.4734, 0.5981000000000001],
(0.59342536966624426, 0.59810000000000008, 0.071888234051542058),
np.array([0.93451045, 0.77825294, 0.07655428])],
[[0.4159, 0.2688, 0.2009],
(0.3108419270833333, 0.2009, 0.2356539062500001),
np.array([0.75715761, 0.32930283, 0.59045447])],
[[0.2131, 0.3023, 0.193],
(0.13605127356930202, 0.193, 0.30938736354614615),
np.array([-0.48463915, 0.53412743, 0.66546058])],
[[0.3469, 0.3608, 0.9131],
(0.87792236696230597, 0.91310000000000002, 0.73974259977827039),
np.array([0.96027764, 0.96170536, 0.95169688])],
[[0.344, 0.3584, 0.5893999999999999],
(0.56571874999999983, 0.58939999999999992, 0.48941249999999997),
np.array([0.78565259, 0.79300245, 0.79387336])],
[[0.3432, 0.3581, 0.3632],
(0.34808779670483109, 0.36320000000000002, 0.30295403518570241),
np.array([0.63023284, 0.63852418, 0.64028572])],
[[0.3446, 0.3579, 0.19149999999999998],
(0.18438362671137187, 0.19149999999999998, 0.15918203408773396),
np.array([0.4732449, 0.47519512, 0.47670436])],
[[0.3401, 0.3548, 0.0883],
(0.084641572717023675, 0.088300000000000003, 0.075931031567080032),
np.array([0.32315746, 0.32983556, 0.33640183])],
[[0.3406, 0.3537, 0.0311],
(0.029948148148148147, 0.031099999999999999, 0.026879474130619162),
np.array([0.19104038, 0.19371002, 0.19903915])]]
ACES_COLORCHECKER_2005 = [
[[0.4316, 0.3777, 0.1008],
(0.11518474980142972, 0.1008, 0.050893725178713274),
np.array([0.11758989, 0.08781098, 0.06184838])],
[[0.4197, 0.3744, 0.34950000000000003],
(0.39178725961538463, 0.34950000000000003, 0.19220633012820515),
np.array([0.40073605, 0.31020146, 0.2334411])],
[[0.276, 0.3016, 0.18359999999999999],
(0.1680159151193634, 0.18359999999999999, 0.25713740053050399),
np.array([0.17949613, 0.20101795, 0.31109218])],
[[0.3703, 0.4499, 0.1325],
(0.10905701266948212, 0.13250000000000001, 0.052952878417426061),
np.array([0.1107181, 0.13503098, 0.06442476])],
[[0.2999, 0.2856, 0.2304],
(0.24193613445378148, 0.23039999999999999, 0.33438655462184863),
np.array([0.2575148, 0.23804357, 0.40454743])],
[[0.2848, 0.3911, 0.4178],
(0.30424300690360523, 0.4178, 0.34622597801073896),
np.array([0.31733562, 0.46758348, 0.41947022])],
[[0.5295, 0.4055, 0.3118],
(0.40714697903822439, 0.31180000000000002, 0.049980271270036999),
np.array([0.41040872, 0.23293505, 0.06167114])],
[[0.2305, 0.2106, 0.11259999999999999],
(0.1232397910731244, 0.11259999999999999, 0.29882307692307686),
np.array([0.13747056, 0.13033376, 0.36114764])],
[[0.5012, 0.3273, 0.1938],
(0.29676920256645278, 0.1938, 0.10154812098991754),
np.array([0.30304559, 0.13139056, 0.12344791])],
[[0.3319, 0.2482, 0.0637],
(0.085181426269137786, 0.063700000000000007, 0.1077664383561644),
np.array([0.09058405, 0.05847923, 0.13035265])],
[[0.3984, 0.5008, 0.4446],
(0.35369137380191684, 0.4446, 0.089488178913738003),
np.array([0.3547791, 0.44849679, 0.10971221])],
[[0.4957, 0.4427, 0.4357],
(0.48786196069573068, 0.43569999999999998, 0.060625976959566286),
np.array([0.49038927, 0.36515801, 0.07497681])],
[[0.2018, 0.1692, 0.0575],
(0.068578605200945636, 0.057500000000000002, 0.21375591016548467),
np.array([0.07890084, 0.07117527, 0.25824906])],
[[0.3253, 0.5032, 0.2318],
(0.14985003974562797, 0.23180000000000001, 0.079001788553259192),
np.array([0.15129818, 0.25515937, 0.09620886])],
[[0.5686, 0.3303, 0.1257],
(0.21638819255222524, 0.12570000000000001, 0.038474931880109003),
np.array([0.21960818, 0.06985597, 0.04703204])],
[[0.4697, 0.4734, 0.5981000000000001],
(0.59342536966624426, 0.59810000000000008, 0.071888234051542058),
np.array([0.5948559, 0.5382559, 0.08916818])],
[[0.4159, 0.2688, 0.2009],
(0.3108419270833333, 0.2009, 0.2356539062500001),
np.array([0.32368864, 0.15049668, 0.28535138])],
[[0.2131, 0.3023, 0.193],
(0.13605127356930202, 0.193, 0.30938736354614615),
np.array([0.14920707, 0.23648468, 0.37415686])],
[[0.3469, 0.3608, 0.9131],
(0.87792236696230597, 0.91310000000000002, 0.73974259977827039),
np.array([0.90989008, 0.91268206, 0.89651699])],
[[0.344, 0.3584, 0.5893999999999999],
(0.56571874999999983, 0.58939999999999992, 0.48941249999999997),
np.array([0.58690823, 0.59107342, 0.59307473])],
[[0.3432, 0.3581, 0.3632],
(0.34808779670483109, 0.36320000000000002, 0.30295403518570241),
np.array([0.36120089, 0.36465935, 0.36711553])],
[[0.3446, 0.3579, 0.19149999999999998],
(0.18438362671137187, 0.19149999999999998, 0.15918203408773396),
np.array([0.19128766, 0.19177359, 0.19289805])],
[[0.3401, 0.3548, 0.0883],
(0.084641572717023675, 0.088300000000000003, 0.075931031567080032),
np.array([0.08793956, 0.08892476, 0.09200134])],
[[0.3406, 0.3537, 0.0311],
(0.029948148148148147, 0.031099999999999999, 0.026879474130619162),
np.array([0.03111895, 0.03126787, 0.03256784])]]
sRGB_TRANSFER_FUNCTION = lambda x: (
x * 12.92 if x <= 0.0031308 else 1.055 * (x ** (1 / 2.4)) - 0.055)
sRGB_INVERSE_TRANSFER_FUNCTION = lambda x: (
x / 12.92 if x <= 0.0031308 else ((x + 0.055) / 1.055) ** 2.4)
class TestXYZ_to_RGB(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.XYZ_to_RGB` definition unit tests
methods.
"""
def test_XYZ_to_RGB(self):
"""
Tests :func:`colour.models.rgb.XYZ_to_RGB` definition.
"""
for xyY, XYZ, RGB in sRGB_LINEAR_COLORCHECKER_2005:
np.testing.assert_almost_equal(
XYZ_to_RGB(
np.array(XYZ),
(0.34567, 0.35850),
(0.31271, 0.32902),
np.array(
[3.24100326, -1.53739899, -0.49861587,
-0.96922426, 1.87592999, 0.04155422,
0.05563942, -0.2040112, 1.05714897]),
'Bradford',
sRGB_TRANSFER_FUNCTION),
RGB,
decimal=7)
for xyY, XYZ, RGB in ACES_COLORCHECKER_2005:
np.testing.assert_almost_equal(
XYZ_to_RGB(
np.array(XYZ),
(0.34567, 0.35850),
(0.32168, 0.33767),
np.array(
[1.04981102e+00, 0.00000000e+00, -9.74845410e-05,
-4.95903023e-01, 1.37331305e+00, 9.82400365e-02,
0.00000000e+00, 0.00000000e+00, 9.91252022e-01]
)),
RGB,
decimal=7)
class TestRGB_to_XYZ(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.RGB_to_XYZ` definition unit tests
methods.
"""
def test_RGB_to_XYZ(self):
"""
Tests :func:`colour.models.rgb.RGB_to_XYZ` definition.
"""
for xyY, XYZ, RGB in sRGB_LINEAR_COLORCHECKER_2005:
np.testing.assert_almost_equal(
RGB_to_XYZ(
RGB,
(0.31271, 0.32902),
(0.34567, 0.35850),
np.array(
[0.41238656, 0.35759149, 0.18045049,
0.21263682, 0.71518298, 0.0721802,
0.01933062, 0.11919716, 0.95037259]),
'Bradford',
sRGB_INVERSE_TRANSFER_FUNCTION),
np.array(XYZ),
decimal=7)
for xyY, XYZ, RGB in ACES_COLORCHECKER_2005:
np.testing.assert_almost_equal(
RGB_to_XYZ(
RGB,
(0.32168, 0.33767),
(0.34567, 0.35850),
np.array(
[9.52552396e-01, 0.00000000e+00, 9.36786317e-05,
3.43966450e-01, 7.28166097e-01, -7.21325464e-02,
0.00000000e+00, 0.00000000e+00, 1.00882518e+00])),
np.array(XYZ),
decimal=7)
class TestRGB_to_RGB(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.RGB_to_RGB` definition unit tests
methods.
"""
def test_RGB_to_RGB(self):
"""
Tests :func:`colour.models.rgb.RGB_to_RGB` definition.
"""
aces_rgb_colourspace = RGB_COLOURSPACES.get('ACES RGB')
sRGB_colourspace = RGB_COLOURSPACES.get('sRGB')
np.testing.assert_almost_equal(
RGB_to_RGB((0.35521588, 0.41, 0.24177934),
aces_rgb_colourspace,
sRGB_colourspace),
np.array([0.33658567, 0.44096335, 0.21509975]),
decimal=7)
np.testing.assert_almost_equal(
RGB_to_RGB((0.33658567, 0.44096335, 0.21509975),
sRGB_colourspace,
aces_rgb_colourspace),
np.array([0.35521588, 0.41, 0.24177934]),
decimal=7)
np.testing.assert_almost_equal(
RGB_to_RGB((0.35521588, 0.41, 0.24177934),
aces_rgb_colourspace,
sRGB_colourspace,
'Bradford'),
np.array([0.33704409, 0.44133521, 0.21429761]),
decimal=7)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3320870 | <reponame>alexpulver/flask-webapi<gh_stars>0
from flask_restful import Resource
class Endpoint(Resource):
def get(self):
return 'Got get', 200
def post(self):
return 'Got post', 201
| StarcoderdataPython |
41815 | <reponame>chuanhao01/MSP_Learn_Python_Turtle
import turtle
pen = turtle.Turtle()
pen.speed("slowest")
# Let's draw something a little more interesting, a 2d grid of squares
# How would you draw a grid of squares?
# For each row in the grid, draw a certain number of squares (number of columns)
square_width = 50
num_rows = 2
num_cols = 2
# Starting X and y coordinate of the turtle pen, so that the pen doesnt draw outside of window
start_x_pos = -(num_cols * square_width) / 2
start_y_pos = (num_rows * square_width) / 2
# use pen.up() to "lift the pen" and stop drawing when moving
pen.up()
# use pen.goto() to set turtle pen to be at that coordinate
pen.goto(start_x_pos, start_y_pos)
# We can use nested for loops, a for loop inside of another for loop
for i in range(num_rows):
# for each row, we iterate through each column and draw squares
for j in range(num_cols):
# use pen.down() to "place the pen on the canvas" and start drawing when moving
pen.down()
# this loop draws the square
for k in range(4):
pen.forward(square_width)
pen.right(90)
pen.up()
# move the turtle forward by one square to draw the next one
pen.forward(square_width)
# position the turtle to be below the previous row of squares to draw the next row
pen.goto(start_x_pos, start_y_pos - ((i + 1)* square_width))
turtle.exitonclick() | StarcoderdataPython |
1951487 | <reponame>Muix2015/Personal_Website<gh_stars>0
from django.db import models
# Create your models here.
class Rate(models.Model):
# GJJ = models.FloatField(default=0, max_digits=4, decimal_places=2)
# SYDK = models.FloatField(default=0, max_digits=4, decimal_places=2)
GJJ = models.DecimalField(default=0, max_digits=4, decimal_places=2)
SYDK = models.DecimalField(default=0, max_digits=4, decimal_places=2) | StarcoderdataPython |
1673849 | #
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Union
import numpy as np
from rl_coach.agents.dqn_agent import DQNAgentParameters
from rl_coach.agents.value_optimization_agent import ValueOptimizationAgent
from rl_coach.core_types import EnvironmentSteps
from rl_coach.schedules import LinearSchedule
class DDQNAgentParameters(DQNAgentParameters):
def __init__(self):
super().__init__()
self.algorithm.num_steps_between_copying_online_weights_to_target = EnvironmentSteps(30000)
self.exploration.epsilon_schedule = LinearSchedule(1, 0.01, 1000000)
self.exploration.evaluation_epsilon = 0.001
@property
def path(self):
return 'rl_coach.agents.ddqn_agent:DDQNAgent'
# Double DQN - https://arxiv.org/abs/1509.06461
class DDQNAgent(ValueOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
def learn_from_batch(self, batch):
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
selected_actions = np.argmax(self.networks['main'].online_network.predict(batch.next_states(network_keys)), 1)
q_st_plus_1, TD_targets = self.networks['main'].parallel_prediction([
(self.networks['main'].target_network, batch.next_states(network_keys)),
(self.networks['main'].online_network, batch.states(network_keys))
])
# add Q value samples for logging
self.q_values.add_sample(TD_targets)
# initialize with the current prediction so that we will
# only update the action that we have actually done in this transition
TD_errors = []
for i in range(batch.size):
new_target = batch.rewards()[i] + \
(1.0 - batch.game_overs()[i]) * self.ap.algorithm.discount * q_st_plus_1[i][selected_actions[i]]
TD_errors.append(np.abs(new_target - TD_targets[i, batch.actions()[i]]))
TD_targets[i, batch.actions()[i]] = new_target
# update errors in prioritized replay buffer
importance_weights = self.update_transition_priorities_and_get_weights(TD_errors, batch)
result = self.networks['main'].train_and_sync_networks(batch.states(network_keys), TD_targets,
importance_weights=importance_weights)
total_loss, losses, unclipped_grads = result[:3]
return total_loss, losses, unclipped_grads
| StarcoderdataPython |
1816744 | <gh_stars>1-10
from .assets import AssetsInfoViewSet,AssetsViewSet,ServerCount,VmCount
from .tag import TagViewSet
from .idc import IDCViewSet
from .other import TestConnectApiView
| StarcoderdataPython |
9716890 | <filename>hafta_2/11.py
i = 0
while(i<10):
print i
if i == 5: # Dongu 5 e esit olunca duruyor.
print "Dongu Duruyor..."
break
i = i + 1
print "program sonlandi!"
| StarcoderdataPython |
65398 | <reponame>himicakumar/cs3240-labdemo
def greeting(msg):
print(msg)
| StarcoderdataPython |
11304131 | <reponame>HITROS/omtb_ml
#!/usr/bin/env python
##############################################################################
# Copyright 2019 HITROS CO., LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
# Author: <NAME>
import re
import sys
import os
import shutil
import cv2
import numpy as np
import random
import argparse
import datetime
from PIL import Image
from pycocotools import mask
from skimage import measure
from itertools import groupby
import json
import fnmatch
INFO = {
"description": "Dataset",
"url": "https://github.com/HITROS/omtb_ml",
"version": "0.1.0",
"year": 2019,
"contributor": "<NAME>",
"date_created": datetime.datetime.utcnow().isoformat(' ')
}
LICENSES = [
{
"id": 1,
"name": "License",
"url": "http://www.apache.org/licenses/LICENSE-2.0"
}
]
CATEGORIES = [
{
# 'id': 1,
# 'name': 'bookshelf',
# 'supercategory': 'gazebo',
# },
# {
# 'id': 2,
# 'name': 'cabinet',
# 'supercategory': 'gazebo',
# },
# {
# 'id': 3,
# 'name': 'cafe_table',
# 'supercategory': 'gazebo',
# },
# {
# 'id': 4,
# 'name': 'cardboard_box',
# 'supercategory': 'gazebo',
# },
# {
# 'id': 5,
# 'name': 'car_wheel',
# 'supercategory': 'gazebo',
# },
# {
# 'id': 6,
# 'name': 'cinder_block',
# 'supercategory': 'gazebo',
# },
# {
# 'id': 7,
# 'name': 'coke_can',
# 'supercategory': 'gazebo',
# },
# {
# 'id': 8,
# 'name': 'construction_barrel',
# 'supercategory': 'gazebo',
# },
# {
# 'id': 9,
# 'name': 'construction_cone',
# 'supercategory': 'gazebo',
# },
# {
# 'id': 10,
# 'name': 'drc_practice_blue_cylinder',
# 'supercategory': 'gazebo',
# },
# {
# 'id': 11,
# 'name': 'drc_practice_hinged_door',
# 'supercategory': 'gazebo',
# },
# {
# 'id': 12,
# 'name': 'ycb_banana',
# 'supercategory': 'gazebo',
# },
# {
# 'id': 13,
# 'name': 'ycb_potted_meat_can',
# 'supercategory': 'gazebo',
# },
'id': 1,
'name': 'ycb_base',
'supercategory': 'ycb',
},
{
'id': 2,
'name': 'ycb_blue_wood_block',
'supercategory': 'ycb',
},
{
'id': 3,
'name': 'ycb_green_wood_block',
'supercategory': 'ycb',
},
{
'id': 4,
'name': 'ycb_orange_wood_block',
'supercategory': 'ycb',
},
{
'id': 5,
'name': 'ycb_red_wood_block',
'supercategory': 'ycb',
},
{
'id': 6,
'name': 'ycb_yellow_wood_block',
'supercategory': 'ycb',
},
]
def parse_args():
parser = argparse.ArgumentParser(description='Save picture')
parser.add_argument(
'-dir',
dest='dir',
help='folder for saving picture (REQUIRED)',
default=None,
type=str)
parser.add_argument(
'name',
help='name',
default='',
type=str)
parser.add_argument(
'log',
help='log',
default='',
type=str)
if parser.parse_args().dir is None:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def create_image_info(image_id, file_name, image_size,
date_captured=datetime.datetime.utcnow().isoformat(' '),
license_id=1, coco_url="", flickr_url=""):
image_info = {
"id": image_id,
"file_name": file_name,
"width": image_size[0],
"height": image_size[1],
"date_captured": date_captured,
"license": license_id,
"coco_url": coco_url,
"flickr_url": flickr_url
}
return image_info
def filter_for_png(root, files):
file_types = ['*.png']
file_types = r'|'.join([fnmatch.translate(x) for x in file_types])
files = [os.path.join(root, f) for f in files]
files = [f for f in files if re.match(file_types, f)]
return files
def filter_for_annotations(root, files, image_filename):
file_types = ['*.png']
file_types = r'|'.join([fnmatch.translate(x) for x in file_types])
basename_no_extension = os.path.splitext(os.path.basename(image_filename))[0]
file_name_prefix = basename_no_extension + '.*'
files = [os.path.join(root, f) for f in files]
files = [f for f in files if re.match(file_types, f)]
files = [f for f in files if re.match(file_name_prefix, os.path.splitext(os.path.basename(f))[0])]
return files
def resize_binary_mask(array, new_size):
image = Image.fromarray(array.astype(np.uint8)*255)
image = image.resize(new_size)
return np.asarray(image).astype(np.bool_)
def close_contour(contour):
if not np.array_equal(contour[0], contour[-1]):
contour = np.vstack((contour, contour[0]))
return contour
def binary_mask_to_polygon(binary_mask, tolerance=0):
"""Converts a binary mask to COCO polygon representation
Args:
binary_mask: a 2D binary numpy array where '1's represent the object
tolerance: Maximum distance from original points of polygon to approximated
polygonal chain. If tolerance is 0, the original coordinate array is returned.
"""
polygons = []
# pad mask to close contours of shapes which start and end at an edge
padded_binary_mask = np.pad(binary_mask, pad_width=1, mode='constant', constant_values=0)
contours = measure.find_contours(padded_binary_mask, 0.5)
contours = np.subtract(contours, 1)
for contour in contours:
contour = close_contour(contour)
contour = measure.approximate_polygon(contour, tolerance)
if len(contour) < 3:
continue
contour = np.flip(contour, axis=1)
segmentation = contour.ravel().tolist()
# after padding and subtracting 1 we may get -0.5 points in our segmentation
segmentation = [0 if i < 0 else i for i in segmentation]
polygons.append(segmentation)
return polygons
def binary_mask_to_rle(binary_mask):
rle = {'counts': [], 'size': list(binary_mask.shape)}
counts = rle.get('counts')
for i, (value, elements) in enumerate(groupby(binary_mask.ravel(order='F'))):
if i == 0 and value == 1:
counts.append(0)
counts.append(len(list(elements)))
return rle
def create_annotation_info(annotation_id, image_id, category_info, binary_mask,
image_size=None, tolerance=2, bounding_box=None):
if image_size is not None:
binary_mask = resize_binary_mask(binary_mask, image_size)
binary_mask_encoded = mask.encode(np.asfortranarray(binary_mask.astype(np.uint8)))
area = mask.area(binary_mask_encoded)
if area < 1:
return None
if bounding_box is None:
bounding_box = mask.toBbox(binary_mask_encoded)
if category_info["is_crowd"]:
is_crowd = 1
segmentation = binary_mask_to_rle(binary_mask)
else:
is_crowd = 0
segmentation = binary_mask_to_polygon(binary_mask, tolerance)
if not segmentation:
return None
annotation_info = {
"id": annotation_id,
"image_id": image_id,
"category_id": category_info["id"],
"iscrowd": is_crowd,
"area": area.tolist(),
"bbox": bounding_box.tolist(),
"segmentation": segmentation,
"width": binary_mask.shape[1],
"height": binary_mask.shape[0],
}
return annotation_info
def create(my_dir, dir_mask, dir_img, json_name):
coco_output = {
"info": INFO,
"licenses": LICENSES,
"categories": CATEGORIES,
"images": [],
"annotations": []
}
image_id = 1
segmentation_id = 1
for _, _, maskfiles in os.walk(dir_mask):
# go through each image
for mask_filename in maskfiles:
image_filename = mask_filename.split('_')[0] + '.png'
# a = os.path.join(dir_img, image_filename)
image = Image.open(os.path.join(dir_mask, mask_filename))
# image.save(r'/home/fy/tmp/test1.png')
image_info = create_image_info(
image_id, os.path.basename(image_filename), image.size)
coco_output["images"].append(image_info)
class_id = [x['id'] for x in CATEGORIES if x['name'] in mask_filename][0]
category_info = {'id': class_id, 'is_crowd': 'crowd' in image_filename}
binary_mask = np.asarray(Image.open(os.path.join(dir_mask, mask_filename))
.convert('L')).astype(np.uint8)
# binary_mask_1 = np.asarray(Image.open(annotation_filename).convert('L')).astype(np.uint8)
annotation_info = create_annotation_info(
segmentation_id, image_id, category_info, binary_mask,
image.size, tolerance=2)
if annotation_info is not None:
coco_output["annotations"].append(annotation_info)
segmentation_id = segmentation_id + 1
image_id = image_id + 1
savename = '{}/'+json_name+'.json'
with open(savename.format(my_dir), 'w') as output_json_file:
json.dump(coco_output, output_json_file, indent=4)
def main(arg):
my_dir = arg.dir
imgDir = my_dir+r'/img'
maskDir = my_dir+r'/mask'
trainDir = my_dir+r'/train'
valDir = my_dir+r'/val'
trainDir_image = my_dir+r'/train/image'
trainDir_mask = my_dir+r'/train/mask'
valDir_image = my_dir + r'/val/image'
valDir_mask = my_dir+r'/val/mask'
exist_trainDir = os.path.exists(trainDir)
exist_valDir = os.path.exists(valDir)
if not exist_trainDir:
os.makedirs(trainDir)
if not exist_valDir:
os.makedirs(valDir)
exist_trainDir_image = os.path.exists(trainDir_image)
exist_trainDir_mask = os.path.exists(trainDir_mask)
if not exist_trainDir_image:
os.makedirs(trainDir_image)
if not exist_trainDir_mask:
os.makedirs(trainDir_mask)
exist_valDir_image = os.path.exists(valDir_image)
exist_valDir_mask = os.path.exists(valDir_mask)
if not exist_valDir_image:
os.makedirs(valDir_image)
if not exist_valDir_mask:
os.makedirs(valDir_mask)
imgnum = 1
for parent, dirnames, _ in os.walk(imgDir):
for dirname in dirnames:
classnum = dirname
# print dirname
print classnum
dirImage = parent + os.sep + dirname
dirMask = maskDir + os.sep + classnum
for _, _, filenames in os.walk(dirImage):
for filename in filenames:
if filename.endswith('.png'):
imageName = str(imgnum) + '.png'
maskName = str(imgnum) + '_' + classnum + '.png'
if random.uniform(0, 1) < 0.75:
shutil.copyfile(os.path.join(dirImage, filename), os.path.join(trainDir_image, imageName))
shutil.copyfile(os.path.join(dirMask, filename), os.path.join(trainDir_mask, maskName))
else:
shutil.copyfile(os.path.join(dirImage, filename), os.path.join(valDir_image, imageName))
shutil.copyfile(os.path.join(dirMask, filename), os.path.join(valDir_mask, maskName))
imgnum += 1
create(my_dir, trainDir_mask, trainDir_image, 'train')
create(my_dir, valDir_mask, valDir_image, 'val')
print 'all done!!!'
if __name__ == '__main__':
arg = parse_args()
main(arg)
| StarcoderdataPython |
1762190 | <gh_stars>10-100
import random
from queue import *
def gcd(a,b):
while b:
a,b=b,a%b
return a
def expo(a,b):
x,y=1,a
while(b>0):
if(b&1):
x=x*y
y=y*y
b>>=1
return x
primes=[0]*100000
def sieve():
primes[1]=1
primes[2]=2
j=4
while(j<100000):
primes[j]=2
j+=2
j=3
while(j<100000):
if primes[j]==0:
primes[j]=j
i=j*j
k=j<<1
while(i<100000):
primes[i]=j
i+=k
j+=2
def rabin_miller(p):
if(p<100000):
return primes[p]==p
if(p%2==0):
return False
s=p-1
while(s%2==0):
s>>=1
for i in range(5):
a=random.randrange(p-1)+1
temp=s
mod=pow(a,temp,p)
while(temp!=p-1 and mod!=1 and mod!=p-1):
mod=(mod*mod)%p
temp=temp*2
if(mod!=p-1 and temp%2==0):
return False
return True
def brent(N):
if(N%2==0):
return 2
if(N<100000):
return primes[N]
y,c,m = random.randint(1, N-1),random.randint(1, N-1),random.randint(1, N-1)
g,r,q = 1,1,1
while g==1:
x=y
for i in range(r):
y=((y*y)%N+c)%N
k=0
while(k<r and g==1):
ys=y
for i in range(min(m,r-k)):
y=((y*y)%N+c)%N
q=q*(abs(x-y))%N
g=gcd(q,N)
k=k+m
r=r*2
if g==N:
while True:
ys=((ys*ys)%N+c)%N
g=gcd(abs(x-ys),N)
if g>1:
break
return g
def factor(n):
Q_1=Queue()
Q_2=[]
Q_1.put(n)
while(not Q_1.empty()):
l=Q_1.get()
if(rabin_miller(l)):
Q_2.append(l)
continue
d=brent(l)
if(d==l):
Q_1.put(l)
else:
Q_1.put(d)
Q_1.put(l//d)
return Q_2
if __name__ == "__main__":
sieve()
t=int(input())
for test in range(t):
n=int(input())
if(n==1):
print ("Case %s: 1"%(test+1))
continue
L=factor(n)
L.sort()
i=0
ans=1
while(i<len(L)):
cnt=L.count(L[i])
pk=[0]*(cnt+1)
pk[0]=1
for j in range(cnt):
pk[j+1]=pk[j]*L[i]
temp=0
cnt+=1
val=cnt*2-1
for j in range(cnt):
temp+=val*pk[j]
val-=2
ans*=temp
i+=cnt-1
print ("Case %s: %s"%(test+1,ans)) | StarcoderdataPython |
1948417 | <filename>Ano_1/LabI/Projeto Final - Moura/repositoryContent/ImageEditor/textEditor.py
from PIL import Image, ImageDraw, ImageFont
from PIL import ExifTags
import math
import sys
import ImageEditor.text_write_styles
#coding: utf-8
#text split----------------------------------------------------------------------------------------------------
def text_split(text):
line1, line2 = "", ""
words = text.split(" ")
for i in range(0, len(words)):
if(i<=len(words)//2):
line1 = line1+words[i]+" "
else:
line2 = line2+words[i]+" "
return line1, line2
#line redimention----------------------------------------------------------------------------------------------
def line_redm(line, fnt, f, letter_type):
line_width, line_height = fnt.getsize(line)
if(line_width > 590): # line bigger than the foto
#line size redimention
while(line_width>590):
f = f*0.95
fnt = ImageFont.truetype(letter_type, int(f))
line_width, line_height = fnt.getsize(line)
return int(f)
#text redimention, borders, write-------------------------------------------------------------------------------
def text_editor(im, text, pos, style):
width, height = im.size
margin=25
#text position in height
pos = pos.lower()
if(pos == "center"):
bottomText = 260 #height//2 + 40
elif(pos == "top"):
bottomText = margin + 70
elif(pos == "mix"):
bottomText = margin + 60
else:
pos="bottom"
bottomText = height-25 #bottom by default
spacing=0 #space between lines
# get a font
if(style=="meme"):
f=80
letter_type = "ImageEditor/fonts/Anton-Regular.ttf"#arial, impact, C:\Windows\Font\Candara, C:\Windows\Font\calibri.ttf, C:\Windows\Font\comic.ttf ;
else:
f=40
letter_type = "ImageEditor/fonts/Ubuntu-Regular.ttf"
if(style=="solid snap"):
factor = 0
else:
style = "solid snap"
factor = 0.5
fnt = ImageFont.truetype(letter_type, f)
#convert to lowercase
text = text.lower()
text_width, text_height = fnt.getsize(text)
line1, line2 = "", ""
#text bigger than the foto----------------------------------------------------------------------------------
if(text_width>590):
line1, line2 = text_split(text)
line1_width, line1_height = fnt.getsize(line1)
#LINE1--------------------------------------------------------------
#redimention line1
f1 = line_redm(line1, fnt, f, letter_type)
fnt1 = ImageFont.truetype(letter_type, f1)
line1_width, line1_height = fnt1.getsize(line1)
#ascend line1
if(pos=="bottom"):
bottomText = bottomText - line1_height - 20#+ (line1_height//2)
if(pos=="center"):
bottomText = bottomText - line1_height + 40# - (line1_height//2)
#black border & write
if(style=="meme"):
im = ImageEditor.text_write_styles.meme(im, margin, bottomText+(line1_height//2), fnt1, line1)
#black line and write
else:
im = ImageEditor.text_write_styles.snap(im, margin, bottomText-50, fnt1, line1, factor)
#LINE2--------------------------------------------------------------
#redimention line2
f2 = line_redm(line2, fnt, f, letter_type)
fnt2 = ImageFont.truetype(letter_type, f2)
line2_width, line2_height = fnt2.getsize(line2)
#black border & write
if(style=="meme"):
if(pos=="mix"):
spacing = height - (bottomText+line2_height)#(line2_height + margin)
elif(pos=="bottom"):
spacing = 20
else:
spacing = line1_height//2
im = ImageEditor.text_write_styles.meme(im, margin, bottomText+line2_height+spacing, fnt2, line2)
#black line and write
else:
position = bottomText-50
if(pos=="mix"):
spacing = height-margin-40-position
else:
spacing = line2_height + 4
im = ImageEditor.text_write_styles.snap(im, margin, position+spacing, fnt2, line2, factor)#line2_height+4 pq da shape
#text fits the foto----------------------------------------------------------------------------------
else:
if(pos=="mix"):
line1, line2 = text_split(text)
line1_width, line1_height = fnt.getsize(line1)
line2_width, line2_height = fnt.getsize(line2)
if(style=="meme"):
spacing = height - margin
#black border & write
im = ImageEditor.text_write_styles.meme(im, margin, bottomText+(line1_height//2), fnt, line1)
im = ImageEditor.text_write_styles.meme(im, margin, spacing, fnt, line2)
else:
spacing = height - (line2_height + margin)
im = ImageEditor.text_write_styles.snap(im, margin, bottomText, fnt, line1, factor)
im = ImageEditor.text_write_styles.snap(im, margin, spacing, fnt, line2, factor)
else:
if(style=="meme"):
#black border & write
im = ImageEditor.text_write_styles.meme(im, margin, bottomText, fnt, text)
else:
im = ImageEditor.text_write_styles.snap(im, margin, bottomText-50, fnt, text, factor)
return im
| StarcoderdataPython |
6688612 | <reponame>brown-ccv/workshop-python-2020
test = {
'name': '8.1',
'suites': [
{
'cases': [
{
'code': r"""
>>> # It looks like your variable is not named correctly.
>>> # Maybe there's a typo?
>>> 'res1_int' in vars()
True
"""
},
{
'code': r"""
>>> # It looks like your variable is not named correctly.
>>> # Maybe there's a typo?
>>> 'res2_int' in vars()
True
"""
},
{
'code': r"""
>>> # It looks like your variable is not named correctly.
>>> # Maybe there's a typo?
>>> 'res3_int' in vars()
True
"""
},
{
'code': r"""
>>> # Your function gives the incorrect answer for
>>> # input = 1.
>>> res1_int == 2
True
"""
},
{
'code': r"""
>>> # Your function gives the incorrect answer for
>>> # input = 9.
>>> res2_int == 18
True
"""
},
{
'code': r"""
>>> # Your function gives the incorrect answer for
>>> # input = 19.
>>> res3_int == 38
True
"""
}
]
}
]
} | StarcoderdataPython |
6656904 | <reponame>mehrdad1373pedramfar/restfulpy<filename>restfulpy/tests/test_server_timestamp.py
from bddrest import response, status, when
from nanohttp import json, Controller, settings
from restfulpy.testing import ApplicableTestCase
class Root(Controller):
@json
def index(self):
return 'index'
class TestServerTimestamp(ApplicableTestCase):
__controller_factory__ = Root
@classmethod
def configure_application(self):
super().configure_application()
settings.merge('timestamp: true')
def test_server_timestamp_header(self):
with self.given('Geting server\'s timestamp'):
assert status == 200
assert 'X-Server-Timestamp' in response.headers
settings.merge('timestamp: false')
when('With default configuration')
assert status == 200
assert 'X-Server-Timestamp' not in response.headers
| StarcoderdataPython |
1632604 | from django.template import Library
from _1327.information_pages.models import InformationDocument
register = Library()
@register.filter
def can_user_see_author(document, user):
if document.show_author_to == InformationDocument.SHOW_AUTHOR_TO_EVERYONE:
return True
elif document.show_author_to == InformationDocument.SHOW_AUTHOR_TO_LOGGED_IN_USERS:
return user.is_authenticated and not user.is_anonymous
else:
return False
| StarcoderdataPython |
5001127 | import psutil
import platform
import datetime
import logging
from kalliope.core.NeuronModule import NeuronModule, InvalidParameterException
logging.basicConfig()
logger = logging.getLogger("kalliope")
class System_status(NeuronModule):
def __init__(self, **kwargs):
super(System_status, self).__init__(**kwargs)
response = {}
os, name, version, _, _, _ = platform.uname()
boot_time = datetime.datetime.fromtimestamp(psutil.boot_time())
response['running_since'] = boot_time.strftime("%A %d. %B %Y")
response['os'] = os
response['os_version'] = version.split('-')[0]
response['system_name'] = name
response['system_nb_cores'] = psutil.cpu_count()
response['cpu'] = psutil.cpu_percent()
response['memory'] = psutil.virtual_memory()[2]
response['disk'] = psutil.disk_usage('/')[3]
logger.debug(response)
self.say(response)
| StarcoderdataPython |
8044404 | #!/home/byrone/Desktop/Project/Album/virtual/bin/python3.6
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| StarcoderdataPython |
94719 | <reponame>stanford-oval/trade-dst
import argparse
import nltk
from nltk.corpus import stopwords
# nltk.download('stopwords')
from collections import Counter
import numpy as np
from matplotlib import pyplot as plt
import json
parser = argparse.ArgumentParser()
parser.add_argument('--input_file')
parser.add_argument('--prediction_file')
parser.add_argument('--do_lower_case', action='store_true')
parser.add_argument('--exclude_stop_words', action='store_true')
parser.add_argument('--ratio', type=float, default=0.02)
parser.add_argument('--num_bins', type=int, default=10)
args = parser.parse_args()
with open(args.input_file, 'r') as f:
data = f.readlines()
stop_words = set(stopwords.words('english'))
def pre_process_sent(sent):
if args.do_lower_case:
sent = sent.lower()
sent.replace('"', '')
tokens = sent.split()
symbols = "!\"#$%&()*+-./:;<=>?@[\]^_`{|}~\n"
new_sent = []
for token in tokens:
if token not in symbols and len(token) > 1:
new_sent.append(token)
final_sent = []
if args.exclude_stop_words:
for token in new_sent:
if token not in stop_words:
final_sent.append(token)
else:
final_sent = new_sent
return final_sent
N = len(data)
doc_turns = []
sentences = []
for val in data[:max(1, int(N * args.ratio))]:
name, sent = val.split('\t', 1)
doc_name, turn = name.rsplit('/', 1)
if doc_name.startswith('/'):
doc_name = doc_name[1:]
doc_turns.append([doc_name, turn])
final_sent = pre_process_sent(sent)
sentences.append(final_sent)
pass
df = {}
for i, (k, t) in enumerate(doc_turns):
tokens = sentences[i]
for w in tokens:
if w not in df.keys():
df[w] = {i}
else:
df[w].add(i)
tf_idf = {}
for i, (k, t) in enumerate(doc_turns):
tokens = sentences[i]
counter = Counter(tokens)
for token in np.unique(tokens):
termf = counter[token] / len(tokens)
docf = len(df[token])
idf = np.log(N/(docf+1))
tf_idf[k, t, token] = termf*idf
pass
with open(args.prediction_file) as fp:
pred_data = json.load(fp)
def remove_none_slots(belief):
for slot_tuple in belief:
domain, slot_name, slot_value = slot_tuple.split('-')
if slot_value == 'none':
continue
yield slot_tuple
def get_joint_accuracy(turn):
return float(set(remove_none_slots(turn['turn_belief'])) == set(remove_none_slots(turn['pred_bs_ptr'])))
accuracies = {}
for i, (k, t) in enumerate(doc_turns):
for turn in pred_data[k].keys():
acc = get_joint_accuracy(pred_data[k][turn])
accuracies[k, turn] = acc
pass
keys = set([x for x in accuracies.keys()])
doc_keys = set([tuple(x) for x in doc_turns])
new_tf_idf = {}
for i, (k, t) in enumerate(doc_turns):
new_tf_idf[k, t] = []
tokens = sentences[i]
for word in tokens:
new_tf_idf[k, t].append(tf_idf[k, t, word])
for i, (k, t) in enumerate(doc_turns):
new_tf_idf[k, t] = sum(new_tf_idf[k, t]) / len(new_tf_idf[k, t])
min_val, max_val = min(new_tf_idf.values()), max(new_tf_idf.values())
num_bins = args.num_bins
step = (max_val - min_val) / num_bins
bins = [(i*step, (i+1)*step) for i in range(num_bins)]
bins_acc = {}
for i, bin in enumerate(bins):
bins_acc[i] = []
for j, (k, t) in enumerate(doc_turns):
try:
if new_tf_idf[k, t] >= bin[0] and new_tf_idf[k, t] < bin[1]:
bins_acc[i].append(accuracies[k, t])
except:
print('*******')
for i in range(len(bins)):
if len(bins_acc[i]) == 0:
bins_acc[i] = 0.0
else:
bins_acc[i] = sum(bins_acc[i]) / len(bins_acc[i])
plt.plot(list(bins_acc.values()))
plt.show()
| StarcoderdataPython |
3577097 | """Peewee migrations -- 001_create_prefix_table.py."""
import peewee as pw
class Prefix(pw.Model):
guild_id = pw.BigIntegerField(primary_key=True)
prefix = pw.CharField(max_length=25)
def migrate(migrator, database, fake=False, **kwargs):
"""Write your migrations here."""
migrator.create_model(Prefix)
def rollback(migrator, database, fake=False, **kwargs):
"""Write your rollback migrations here."""
migrator.drop_table('prefix')
| StarcoderdataPython |
164089 | <reponame>mariotaku/nanovg
import ctypes
################################################################################
class _NVGrgba4(ctypes.Structure): # Internal
_fields_ = [('r', ctypes.c_float),
('g', ctypes.c_float),
('b', ctypes.c_float),
('a', ctypes.c_float)]
class _NVGrgbaA(ctypes.Structure): # Internal
_fields_ = [('rgba', ctypes.c_float * 4)]
# See https://docs.python.org/3/library/ctypes.html#ctypes.Structure._anonymous_
class NVGcolor(ctypes.Union):
_anonymous_ = ('rgbaA', 'rgba4')
_fields_ = [('rgbaA', _NVGrgba4),
('rgba4', _NVGrgbaA)]
class NVGpaint(ctypes.Structure):
_fields_ = [("xform", ctypes.c_float * 6),
("extent", ctypes.c_float * 2),
("radius", ctypes.c_float),
("feather", ctypes.c_float),
("innerColor", NVGcolor),
("outerColor", NVGcolor),
("image", ctypes.c_int32)]
class NVGcompositeOperationState(ctypes.Structure):
_fields_ = [("srcRGB", ctypes.c_int32),
("dstRGB", ctypes.c_int32),
("srcAlpha", ctypes.c_int32),
("dstAlpha", ctypes.c_int32)]
class NVGglyphPosition(ctypes.Structure):
_fields_ = [("str", ctypes.c_void_p),
("x", ctypes.c_float),
("minx", ctypes.c_float),
("maxx", ctypes.c_float)]
class NVGtextRow(ctypes.Structure):
_fields_ = [("start", ctypes.c_void_p),
("end", ctypes.c_void_p),
("next", ctypes.c_void_p),
("width", ctypes.c_float),
("minx", ctypes.c_float),
("maxx", ctypes.c_float)]
################################################################################
# NVGwinding
NVG_CCW = 1
NVG_CW = 2
# NVGsolidity
NVG_SOLID = 1
NVG_HOLE = 2
# NVGlineCap
NVG_BUTT = 0
NVG_ROUND = 1
NVG_SQUARE = 2
NVG_BEVEL = 3
NVG_MITER = 4
# NVGalign
# Horizontal align
NVG_ALIGN_LEFT = 1
NVG_ALIGN_CENTER = 2
NVG_ALIGN_RIGHT = 4
# Vertical align
NVG_ALIGN_TOP = 8
NVG_ALIGN_MIDDLE = 16
NVG_ALIGN_BOTTOM = 32
NVG_ALIGN_BASELINE = 64
# NVGblendFactor
NVG_ZERO = 1 << 0
NVG_ONE = 1 << 1
NVG_SRC_COLOR = 1 << 2
NVG_ONE_MINUS_SRC_COLOR = 1 << 3
NVG_DST_COLOR = 1 << 4
NVG_ONE_MINUS_DST_COLOR = 1 << 5
NVG_SRC_ALPHA = 1 << 6
NVG_ONE_MINUS_SRC_ALPHA = 1 << 7
NVG_DST_ALPHA = 1 << 8
NVG_ONE_MINUS_DST_ALPHA = 1 << 9
NVG_SRC_ALPHA_SATURATE = 1 << 10
# NVGcompositeOperation
NVG_SOURCE_OVER = 0
NVG_SOURCE_IN = 1
NVG_SOURCE_OUT = 2
NVG_ATOP = 3
NVG_DESTINATION_OVER = 4
NVG_DESTINATION_IN = 5
NVG_DESTINATION_OUT = 6
NVG_DESTINATION_ATOP = 7
NVG_LIGHTER = 8
NVG_COPY = 9
NVG_XOR = 10
# NVGimageFlags
NVG_IMAGE_GENERATE_MIPMAPS = 1
NVG_IMAGE_REPEATX = 2
NVG_IMAGE_REPEATY = 4
NVG_IMAGE_FLIPY = 8
NVG_IMAGE_PREMULTIPLIED = 16
NVG_IMAGE_NEAREST = 32
# NVGcreateFlags
NVG_ANTIALIAS = 1
NVG_STENCIL_STROKES = 2
NVG_DEBUG = 4
# Python-NanoVG : A Python bindings of NanoVG
# Copyright (c) 2017 vaiorabbit
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
#
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
#
# 3. This notice may not be removed or altered from any source
# distribution.
| StarcoderdataPython |
11312783 | <reponame>rglaue/bakauditor
import os
from functools import lru_cache
from datetime import datetime
from types import SimpleNamespace
@lru_cache(maxsize=4096)
def get_zfs_snapshots(ssh=None):
ssh_cmd = '' if not ssh else 'ssh {} '.format(ssh)
result = []
with os.popen('{}zfs list -p -t snapshot'.format(ssh_cmd)) as p:
for s in p.readlines()[1:]:
try:
path, size = s.split()[:2]
result.append((path, size))
except:
pass
return result
def check(**kwargs):
fs = kwargs['fs']
t = 0
size = None
for snap in get_zfs_snapshots(kwargs.get('ssh')):
s = snap[0]
if s.startswith(fs + '@'):
try:
t = max(
t,
datetime.strptime(s.split('@')[1],
kwargs.get('time-fmt')).timestamp())
try:
size = int(snap[1])
except:
pass
except:
pass
return SimpleNamespace(ok=t > 0, time=t, size=size, err=None)
| StarcoderdataPython |
5015 | <reponame>d53dave/python-crypto-licensecheck
import sys
from Crypto.Signature import pkcs1_15
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
def sign_data(key, data, output_file):
with open(key, 'r', encoding='utf-8') as keyFile:
rsakey = RSA.importKey(keyFile.read())
signer = pkcs1_15.new(rsakey)
digest = SHA256.new(data.encode('utf-8'))
with open(output_file, 'wb') as out:
out.write(signer.sign(digest))
if __name__ == '__main__':
key_file = sys.argv[1]
input_string = sys.argv[2]
out_file = sys.argv[3]
sign_data(key_file, input_string, out_file)
| StarcoderdataPython |
1651355 | from .application import ApplicationModel
from .using_application import UsingApplicationModel
| StarcoderdataPython |
188498 | <reponame>AquaDiva-INFRA1/ad-query-proxy
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 28 16:06:14 2020
@author: <NAME>
"""
from parsers.bibtex import parse
def test_parse() -> None:
source = "tests/resources/bibtex.bib"
with open(source, encoding="utf-8") as data:
for bibdict in parse(data):
assert "year" in bibdict
assert bibdict["year"] in ("2019", "2020", "2021")
assert "publisher" in bibdict
assert bibdict["publisher"] == "Association for Lorem Ipsum"
assert bibdict["author"] == ["Lorem Ipsum", "Lörem Ipßüm"]
| StarcoderdataPython |
5018266 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from unittest import mock
from oslo_config import cfg
from oslo_utils import timeutils
from cinder.api import extensions
from cinder.api import microversions as mv
from cinder.api.openstack import api_version_request as api_version
from cinder.api.v3 import messages
from cinder import context
from cinder import exception
from cinder.message import api as message_api
from cinder.message import message_field
from cinder.tests.unit.api import fakes
import cinder.tests.unit.fake_constants as fake_constants
from cinder.tests.unit import test
from cinder.tests.unit import utils
CONF = cfg.CONF
version_header_name = 'OpenStack-API-Version'
class MessageApiTest(test.TestCase):
def setUp(self):
super(MessageApiTest, self).setUp()
self.message_api = message_api.API()
self.mock_object(self.message_api, 'db')
self.ctxt = context.RequestContext('admin', 'fakeproject', True)
self.ctxt.request_id = 'fakerequestid'
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.controller = messages.MessagesController(self.ext_mgr)
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create(self, mock_utcnow):
CONF.set_override('message_ttl', 300)
mock_utcnow.return_value = datetime.datetime.utcnow()
expected_expires_at = timeutils.utcnow() + datetime.timedelta(
seconds=300)
expected_message_record = {
'project_id': 'fakeproject',
'request_id': 'fakerequestid',
'resource_type': 'fake_resource_type',
'resource_uuid': None,
'action_id': message_field.Action.SCHEDULE_ALLOCATE_VOLUME[0],
'detail_id': message_field.Detail.UNKNOWN_ERROR[0],
'message_level': 'ERROR',
'expires_at': expected_expires_at,
'event_id': "VOLUME_fake_resource_type_001_001",
}
self.message_api.create(self.ctxt,
message_field.Action.SCHEDULE_ALLOCATE_VOLUME,
detail=message_field.Detail.UNKNOWN_ERROR,
resource_type="fake_resource_type")
self.message_api.db.message_create.assert_called_once_with(
self.ctxt, expected_message_record)
mock_utcnow.assert_called_with()
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_with_minimum_args(self, mock_utcnow):
CONF.set_override('message_ttl', 300)
mock_utcnow.return_value = datetime.datetime.utcnow()
expected_expires_at = timeutils.utcnow() + datetime.timedelta(
seconds=300)
expected_message_record = {
'project_id': 'fakeproject',
'request_id': 'fakerequestid',
'resource_type': message_field.Resource.VOLUME,
'resource_uuid': None,
'action_id': message_field.Action.SCHEDULE_ALLOCATE_VOLUME[0],
'detail_id': message_field.Detail.UNKNOWN_ERROR[0],
'message_level': 'ERROR',
'expires_at': expected_expires_at,
'event_id': "VOLUME_VOLUME_001_001",
}
self.message_api.create(
self.ctxt,
action=message_field.Action.SCHEDULE_ALLOCATE_VOLUME)
self.message_api.db.message_create.assert_called_once_with(
self.ctxt, expected_message_record)
mock_utcnow.assert_called_with()
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_with_no_detail(self, mock_utcnow):
# Should get Detail.UNKNOWN_ERROR
CONF.set_override('message_ttl', 300)
mock_utcnow.return_value = datetime.datetime.utcnow()
expected_expires_at = timeutils.utcnow() + datetime.timedelta(
seconds=300)
expected_message_record = {
'project_id': 'fakeproject',
'request_id': 'fakerequestid',
'resource_type': 'fake_resource_type',
'resource_uuid': None,
'action_id': message_field.Action.SCHEDULE_ALLOCATE_VOLUME[0],
'detail_id': message_field.Detail.UNKNOWN_ERROR[0],
'message_level': 'ERROR',
'expires_at': expected_expires_at,
'event_id': "VOLUME_fake_resource_type_001_001",
}
self.message_api.create(
self.ctxt,
action=message_field.Action.SCHEDULE_ALLOCATE_VOLUME,
resource_type="fake_resource_type")
self.message_api.db.message_create.assert_called_once_with(
self.ctxt, expected_message_record)
mock_utcnow.assert_called_with()
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_with_detail_only(self, mock_utcnow):
CONF.set_override('message_ttl', 300)
mock_utcnow.return_value = datetime.datetime.utcnow()
expected_expires_at = timeutils.utcnow() + datetime.timedelta(
seconds=300)
expected_message_record = {
'project_id': 'fakeproject',
'request_id': 'fakerequestid',
'resource_type': 'fake_resource_type',
'resource_uuid': None,
'action_id': message_field.Action.SCHEDULE_ALLOCATE_VOLUME[0],
# this doesn't make sense for this Action, but that's the point
'detail_id': message_field.Detail.FAILED_TO_UPLOAD_VOLUME[0],
'message_level': 'ERROR',
'expires_at': expected_expires_at,
'event_id': "VOLUME_fake_resource_type_001_004",
}
self.message_api.create(
self.ctxt,
action=message_field.Action.SCHEDULE_ALLOCATE_VOLUME,
detail=message_field.Detail.FAILED_TO_UPLOAD_VOLUME,
resource_type="fake_resource_type")
self.message_api.db.message_create.assert_called_once_with(
self.ctxt, expected_message_record)
mock_utcnow.assert_called_with()
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_passed_exception_no_detail(self, mock_utcnow):
# Detail should be automatically supplied based on the
# message_field.Detail.EXCEPTION_DETAIL_MAPPINGS
CONF.set_override('message_ttl', 300)
mock_utcnow.return_value = datetime.datetime.utcnow()
expected_expires_at = timeutils.utcnow() + datetime.timedelta(
seconds=300)
expected_message_record = {
'project_id': 'fakeproject',
'request_id': 'fakerequestid',
'resource_type': 'fake_resource_type',
'resource_uuid': None,
'action_id': message_field.Action.SCHEDULE_ALLOCATE_VOLUME[0],
# this is determined by the exception we'll be passing
'detail_id': message_field.Detail.NOT_ENOUGH_SPACE_FOR_IMAGE[0],
'message_level': 'ERROR',
'expires_at': expected_expires_at,
'event_id': "VOLUME_fake_resource_type_001_007",
}
exc = exception.ImageTooBig(image_id='fake_image', reason='MYOB')
self.message_api.create(
self.ctxt,
action=message_field.Action.SCHEDULE_ALLOCATE_VOLUME,
exception=exc,
resource_type="fake_resource_type")
self.message_api.db.message_create.assert_called_once_with(
self.ctxt, expected_message_record)
mock_utcnow.assert_called_with()
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_passed_unmapped_exception_no_detail(self, mock_utcnow):
CONF.set_override('message_ttl', 300)
mock_utcnow.return_value = datetime.datetime.utcnow()
expected_expires_at = timeutils.utcnow() + datetime.timedelta(
seconds=300)
expected_message_record = {
'project_id': 'fakeproject',
'request_id': 'fakerequestid',
'resource_type': 'fake_resource_type',
'resource_uuid': None,
'action_id': message_field.Action.COPY_IMAGE_TO_VOLUME[0],
'detail_id': message_field.Detail.UNKNOWN_ERROR[0],
'message_level': 'ERROR',
'expires_at': expected_expires_at,
'event_id': "VOLUME_fake_resource_type_005_001",
}
exc = exception.ImageUnacceptable(image_id='fake_image', reason='MYOB')
self.message_api.create(
self.ctxt,
action=message_field.Action.COPY_IMAGE_TO_VOLUME,
exception=exc,
resource_type="fake_resource_type")
self.message_api.db.message_create.assert_called_once_with(
self.ctxt, expected_message_record)
mock_utcnow.assert_called_with()
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_passed_mapped_exception_and_detail(self, mock_utcnow):
# passed Detail should be ignored because this is a mapped exception
CONF.set_override('message_ttl', 300)
mock_utcnow.return_value = datetime.datetime.utcnow()
expected_expires_at = timeutils.utcnow() + datetime.timedelta(
seconds=300)
expected_message_record = {
'project_id': 'fakeproject',
'request_id': 'fakerequestid',
'resource_type': 'fake_resource_type',
'resource_uuid': None,
'action_id': message_field.Action.UPDATE_ATTACHMENT[0],
'detail_id': message_field.Detail.NOT_ENOUGH_SPACE_FOR_IMAGE[0],
'message_level': 'ERROR',
'expires_at': expected_expires_at,
'event_id': "VOLUME_fake_resource_type_004_007",
}
exc = exception.ImageTooBig(image_id='fake_image', reason='MYOB')
self.message_api.create(
self.ctxt,
action=message_field.Action.UPDATE_ATTACHMENT,
detail=message_field.Detail.VOLUME_ATTACH_MODE_INVALID,
exception=exc,
resource_type="fake_resource_type")
self.message_api.db.message_create.assert_called_once_with(
self.ctxt, expected_message_record)
mock_utcnow.assert_called_with()
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_passed_unmapped_exception_and_detail(self, mock_utcnow):
# passed Detail should be honored
CONF.set_override('message_ttl', 300)
mock_utcnow.return_value = datetime.datetime.utcnow()
expected_expires_at = timeutils.utcnow() + datetime.timedelta(
seconds=300)
expected_message_record = {
'project_id': 'fakeproject',
'request_id': 'fakerequestid',
'resource_type': 'fake_resource_type',
'resource_uuid': None,
'action_id': message_field.Action.UPDATE_ATTACHMENT[0],
'detail_id': message_field.Detail.VOLUME_ATTACH_MODE_INVALID[0],
'message_level': 'ERROR',
'expires_at': expected_expires_at,
'event_id': "VOLUME_fake_resource_type_004_005",
}
exc = ValueError('bogus error')
self.message_api.create(
self.ctxt,
action=message_field.Action.UPDATE_ATTACHMENT,
detail=message_field.Detail.VOLUME_ATTACH_MODE_INVALID,
exception=exc,
resource_type="fake_resource_type")
self.message_api.db.message_create.assert_called_once_with(
self.ctxt, expected_message_record)
mock_utcnow.assert_called_with()
def test_create_swallows_exception(self):
self.mock_object(self.message_api.db, 'create',
side_effect=Exception())
self.message_api.create(self.ctxt,
message_field.Action.ATTACH_VOLUME,
"fake_resource")
self.message_api.db.message_create.assert_called_once_with(
self.ctxt, mock.ANY)
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_from_request_context(self, mock_utcnow):
CONF.set_override('message_ttl', 300)
mock_utcnow.return_value = datetime.datetime.utcnow()
expected_expires_at = timeutils.utcnow() + datetime.timedelta(
seconds=300)
self.ctxt.message_resource_id = 'fake-uuid'
self.ctxt.message_resource_type = 'fake_resource_type'
self.ctxt.message_action = message_field.Action.BACKUP_CREATE
expected_message_record = {
'project_id': 'fakeproject',
'request_id': 'fakerequestid',
'resource_type': 'fake_resource_type',
'resource_uuid': 'fake-uuid',
'action_id': message_field.Action.BACKUP_CREATE[0],
'detail_id': message_field.Detail.BACKUP_INVALID_STATE[0],
'message_level': 'ERROR',
'expires_at': expected_expires_at,
'event_id': "VOLUME_fake_resource_type_013_017",
}
self.message_api.create_from_request_context(
self.ctxt,
detail=message_field.Detail.BACKUP_INVALID_STATE)
self.message_api.db.message_create.assert_called_once_with(
self.ctxt, expected_message_record)
mock_utcnow.assert_called_with()
def test_get(self):
self.message_api.get(self.ctxt, 'fake_id')
self.message_api.db.message_get.assert_called_once_with(self.ctxt,
'fake_id')
def test_get_all(self):
self.message_api.get_all(self.ctxt)
self.message_api.db.message_get_all.assert_called_once_with(
self.ctxt, filters={}, limit=None, marker=None, offset=None,
sort_dirs=None, sort_keys=None)
def test_delete(self):
admin_context = mock.Mock()
self.mock_object(self.ctxt, 'elevated', return_value=admin_context)
self.message_api.delete(self.ctxt, 'fake_id')
self.message_api.db.message_destroy.assert_called_once_with(
admin_context, 'fake_id')
def test_cleanup_expired_messages(self):
admin_context = mock.Mock()
self.mock_object(self.ctxt, 'elevated', return_value=admin_context)
self.message_api.cleanup_expired_messages(self.ctxt)
self.message_api.db.cleanup_expired_messages.assert_called_once_with(
admin_context)
def create_message_for_tests(self):
"""Create messages to test pagination functionality"""
utils.create_message(
self.ctxt, action=message_field.Action.ATTACH_VOLUME)
utils.create_message(
self.ctxt, action=message_field.Action.SCHEDULE_ALLOCATE_VOLUME)
utils.create_message(
self.ctxt,
action=message_field.Action.COPY_VOLUME_TO_IMAGE)
utils.create_message(
self.ctxt,
action=message_field.Action.COPY_VOLUME_TO_IMAGE)
def test_get_all_messages_with_limit(self):
self.create_message_for_tests()
url = '/v3/messages?limit=1'
req = fakes.HTTPRequest.blank(url)
req.method = 'GET'
req.content_type = 'application/json'
req.headers = mv.get_mv_header(mv.MESSAGES_PAGINATION)
req.api_version_request = mv.get_api_version(mv.RESOURCE_FILTER)
req.environ['cinder.context'].is_admin = True
res = self.controller.index(req)
self.assertEqual(1, len(res['messages']))
url = '/v3/messages?limit=3'
req = fakes.HTTPRequest.blank(url)
req.method = 'GET'
req.content_type = 'application/json'
req.headers = mv.get_mv_header(mv.MESSAGES_PAGINATION)
req.api_version_request = mv.get_api_version(mv.RESOURCE_FILTER)
req.environ['cinder.context'].is_admin = True
res = self.controller.index(req)
self.assertEqual(3, len(res['messages']))
def test_get_all_messages_with_limit_wrong_version(self):
self.create_message_for_tests()
PRE_MESSAGES_PAGINATION = mv.get_prior_version(mv.MESSAGES_PAGINATION)
url = '/v3/messages?limit=1'
req = fakes.HTTPRequest.blank(url)
req.method = 'GET'
req.content_type = 'application/json'
req.headers = mv.get_mv_header(PRE_MESSAGES_PAGINATION)
req.api_version_request = mv.get_api_version(PRE_MESSAGES_PAGINATION)
req.environ['cinder.context'].is_admin = True
res = self.controller.index(req)
self.assertEqual(4, len(res['messages']))
def test_get_all_messages_with_offset(self):
self.create_message_for_tests()
url = '/v3/messages?offset=1'
req = fakes.HTTPRequest.blank(url)
req.method = 'GET'
req.content_type = 'application/json'
req.headers = mv.get_mv_header(mv.MESSAGES_PAGINATION)
req.api_version_request = mv.get_api_version(mv.MESSAGES_PAGINATION)
req.environ['cinder.context'].is_admin = True
res = self.controller.index(req)
self.assertEqual(3, len(res['messages']))
def test_get_all_messages_with_limit_and_offset(self):
self.create_message_for_tests()
url = '/v3/messages?limit=2&offset=1'
req = fakes.HTTPRequest.blank(url)
req.method = 'GET'
req.content_type = 'application/json'
req.headers = mv.get_mv_header(mv.MESSAGES_PAGINATION)
req.api_version_request = mv.get_api_version(mv.MESSAGES_PAGINATION)
req.environ['cinder.context'].is_admin = True
res = self.controller.index(req)
self.assertEqual(2, len(res['messages']))
def test_get_all_messages_with_filter(self):
self.create_message_for_tests()
url = '/v3/messages?action_id=%s' % (
message_field.Action.ATTACH_VOLUME[0])
req = fakes.HTTPRequest.blank(url)
req.method = 'GET'
req.content_type = 'application/json'
req.headers = mv.get_mv_header(mv.MESSAGES_PAGINATION)
req.api_version_request = mv.get_api_version(mv.MESSAGES_PAGINATION)
req.environ['cinder.context'].is_admin = True
res = self.controller.index(req)
self.assertEqual(1, len(res['messages']))
def test_get_all_messages_with_sort(self):
self.create_message_for_tests()
url = '/v3/messages?sort=event_id:asc'
req = fakes.HTTPRequest.blank(url)
req.method = 'GET'
req.content_type = 'application/json'
req.headers = mv.get_mv_header(mv.MESSAGES_PAGINATION)
req.api_version_request = mv.get_api_version(mv.MESSAGES_PAGINATION)
req.environ['cinder.context'].is_admin = True
res = self.controller.index(req)
expect_result = [
"VOLUME_VOLUME_001_002",
"VOLUME_VOLUME_002_002",
"VOLUME_VOLUME_003_002",
"VOLUME_VOLUME_003_002",
]
expect_result.sort()
self.assertEqual(4, len(res['messages']))
self.assertEqual(expect_result[0],
res['messages'][0]['event_id'])
self.assertEqual(expect_result[1],
res['messages'][1]['event_id'])
self.assertEqual(expect_result[2],
res['messages'][2]['event_id'])
self.assertEqual(expect_result[3],
res['messages'][3]['event_id'])
def test_get_all_messages_paging(self):
self.create_message_for_tests()
# first request of this test
url = '/v3/%s/messages?limit=2' % fake_constants.PROJECT_ID
req = fakes.HTTPRequest.blank(url)
req.method = 'GET'
req.content_type = 'application/json'
req.headers = mv.get_mv_header(mv.MESSAGES_PAGINATION)
req.api_version_request = mv.get_api_version(mv.RESOURCE_FILTER)
req.environ['cinder.context'].is_admin = True
res = self.controller.index(req)
self.assertEqual(2, len(res['messages']))
next_link = ('http://localhost/v3/%s/messages?limit='
'2&marker=%s') % (fake_constants.PROJECT_ID,
res['messages'][1]['id'])
self.assertEqual(next_link,
res['messages_links'][0]['href'])
# Second request in this test
# Test for second page using marker (res['messages][0]['id'])
# values fetched in first request with limit 2 in this test
url = '/v3/%s/messages?limit=1&marker=%s' % (
fake_constants.PROJECT_ID, res['messages'][0]['id'])
req = fakes.HTTPRequest.blank(url)
req.method = 'GET'
req.content_type = 'application/json'
req.headers = mv.get_mv_header(mv.MESSAGES_PAGINATION)
req.api_version_request = api_version.max_api_version()
req.environ['cinder.context'].is_admin = True
result = self.controller.index(req)
self.assertEqual(1, len(result['messages']))
# checking second message of first request in this test with first
# message of second request. (to test paging mechanism)
self.assertEqual(res['messages'][1], result['messages'][0])
| StarcoderdataPython |
4956528 | # Generated by Django 3.1.8 on 2021-05-14 14:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('metadata', '0012_update_services'),
]
operations = [
migrations.AddField(
model_name='administrativearea',
name='area_name',
field=models.CharField(blank=True, max_length=255),
),
]
| StarcoderdataPython |
1748832 | r"""
ECEI2D
=======
contains 2D version of synthetic Electron Cyclotron
Emission Imaging Diagnostic.
Unit Conventions
-----------------
In ECEI2D, Gaussian unit is used by default. The units for common quantities
are:
length:
centi-meter
time:
second
mass:
gram
magnetic field:
Gauss
temperature:
erg (we use energy unit for particle temperature)
Usage
------
Preparation
************
A complete ECEI2D run requires knowledge of the plasma, and the receivers.
The former should be provided as an instance of
:py:class:`ECEI_Profile<FPSDP.Plasma.PlasmaProfile.ECEI_Profile>`, and the
latter a list of
:py:class:`Detector2D<FPSDP.Diagnostics.ECEI.ECEI2D.Detector2D.Detector2D>`.
We will assume these two objects have been created and named `plasma2d` and
`detectors`.
First, we import the ECEImagingSystem class::
>>> from sdp.diagnostic.ecei.ecei2d import ECEImagingSystem
Then, we initialize the ECEI with plasma2d and detectors::
>>> ecei = ECEImagingSystem(plasma2d, detectors)
Note that some additional parameters can be provided while initialization,
check the doc-string in :py:class:`ECEImagingSystem
<FPSDP.Diagnostics.ECEI.ECEI2D.Imaging.ECEImagingSystem>` for a detailed list
of these parameters.
The next step is to setup the calculation area. ECEI uses 3D Cartesian
coordinates, and assumes rectangular cells. So, three 1D arrays specifying
grids along Z(local toroidal), Y(vertical), and X(Radial) directions is needed.
The detector is always assumed being on the low-field side, and in vacuum. The
calculation area needs to include part of the vacuum region, and large enough
to include all the resonant region. X1D mesh also determines the calculation
start and end points, so its normally from larger X (vacuum region outside of
plasma) to smaller X (inner plasma).
Let's say we choose a uniform XYZ grid, we can create it using
:py:module:`numpy<numpy>` as::
>>> X1D = numpy.linspace(251, 216, 160)
>>> Y1D = numpy.linspace(-30, 30, 65)
>>> Z1D = numpy.linspace(-30, 30, 65)
and set ECEI calculation area::
>>> ecei.set_coords([Z1D, Y1D, X1D])
It is possible that different detectors need different initial mesh. This is
particularly important if these channels have very different resonance
locations. In this case, we can specify mesh for chosen channels only.
For example, we can set channel 0, and channel 3 only::
>>> ecei.set_coords([Z1D, Y1D, X1D], channelID=[0, 3])
Note that channelID is numbered from 0.
It is recommended to run the automatic mesh adjustment before diagnosing::
>>> ecei.auto_adjust_mesh(fine_coeff = 1)
This function run diagnose on the preset mesh and optimize its X grids by
making mesh fine within resonance region, and coarse elsewhere. The fine_coeff
is a parameter controlling the mesh size. The larger this parameter, the finer
resulted mesh overall.
Diagnose
*********
We can now run ECEI and observe the result::
>>> ecei.diagnose(time=[0, 1, 2])
Running diagnose() without a `time` argument will diagnose the equilibrium
plasma. And a given `time` list will result in a series of diagnosis on
perturbed plasma at corresponding time snaps.
The measured electron temperature is stored in `Te` attribute.
>>> ecei.Te
array([[ 1.47142490e-08, 1.46694915e-08, 1.46748651e-08],
[ 1.56084333e-08, 1.51977835e-08, 1.48657565e-08],
[ 1.69261271e-08, 1.65879854e-08, 1.61561885e-08],
[ 1.58508369e-08, 1.63720864e-08, 1.68176195e-08],
[ 1.46057450e-08, 1.47844442e-08, 1.50868828e-08],
[ 1.45398116e-08, 1.45283573e-08, 1.45292955e-08],
[ 1.49914189e-08, 1.48120112e-08, 1.47148505e-08],
[ 1.65238937e-08, 1.60221945e-08, 1.55572079e-08]])
>>> ecei.Te.shape
(8L, 3L)
The first dimension of Te corresponds to the detectors, and the second
dimension for time.
It is OK to run diagnose multiple times with different parameters, but the
result will be overwritten.
Post analysis
**************
ECEImagingSystem provides additional information about the diagnosing process.
The most useful one is `view_spots`. This attribute stores a list of detailed
emission spot information for each channel in the most recent time snap.
>>> vs = ecei.view_spots
>>> len(vs)
8
>>> vs[0].shape
(65L, 63L)
The shape of each view_spot is [NY, NX], it contains the instrumental function
on the 2D plane, with the largest point normalized to 1. This means the
measured Te is just a weighted average of the Te on the 2D plane under this
weighting function.
More information can be obtained from `channels` attribute, which is literally
the ECE2D objects that carry out the diagnostic.
The propogation and absorption of the probing waves can be found in
`propagator` attribute in each channel.
Modules
--------
CurrentCorrelationTensor:
Contains classes for calculating current correlation tensor. Mainly
includes non-relativistic and relativistic versions.
Detector2D:
Contains Detector class for ECEI2D. Now it has GaussianAntenna type
detector.
Reciprocity:
Main module carrying out 2D ECE calculation. ECE2D class is the main
class.
Imaging:
Contains multi-channel ECE Imaging class. ECEImagingSystem is the main
class.
"""
from .imaging import ECEImagingSystem
from .ece import ECE2D
from .detector2d import GaussianAntenna
| StarcoderdataPython |
1644581 | from __future__ import annotations
from abc import abstractmethod
from typing import TYPE_CHECKING, Callable, Iterable, List, Optional, Tuple, Union
from open_mafia_engine.core.enums import ActionResolutionType
from open_mafia_engine.core.event_system import (
Action,
EPostAction,
EPreAction,
Event,
Subscriber,
handler,
)
from open_mafia_engine.core.game_object import GameObject, converter
if TYPE_CHECKING:
from open_mafia_engine.core.game import Game
class Phase(GameObject):
"""Represents a monolithic "phase" of action.
Attributes
----------
name : str
The current phase name.
action_resolution : ActionResolutionType
One of {"instant", "end_of_phase"}
"""
def __init__(
self,
game,
/,
name: str,
action_resolution: str = "instant",
):
super().__init__(game)
self._name = name
self._action_resolution = ActionResolutionType(action_resolution)
@property
def name(self) -> str:
return self._name
@property
def action_resolution(self) -> ActionResolutionType:
return self._action_resolution
@action_resolution.setter
def action_resolution(self, v: str):
self._action_resolution = ActionResolutionType(v)
def __eq__(self, o: object) -> bool:
if not isinstance(o, Phase):
return NotImplemented
return (o.name == self.name) and (o.action_resolution == self.action_resolution)
class ETryPhaseChange(Event):
"""Try to change the phase."""
def __init__(self, game, /, new_phase: Optional[Phase] = None):
if not (new_phase is None or isinstance(new_phase, Phase)):
new_phase = converter.convert(game, Phase, new_phase)
self._new_phase = new_phase
super().__init__(game)
@property
def new_phase(self) -> Optional[Phase]:
return self._new_phase
class EPrePhaseChange(EPreAction):
"""Phase is about to change."""
@property
def action(self) -> PhaseChangeAction:
return self._action
@property
def new_phase(self) -> Optional[Phase]:
return self.action.new_phase
@property
def old_phase(self) -> Phase:
return self.action.old_phase
class EPostPhaseChange(EPostAction):
"""Phase has changed."""
@property
def action(self) -> PhaseChangeAction:
return self._action
@property
def new_phase(self) -> Phase:
np = self.action.new_phase
if np is None:
np = self.game.current_phase
return np
@property
def old_phase(self) -> Phase:
return self.action.old_phase
class PhaseChangeAction(Action):
"""Action to change the phase.
Parameters
----------
new_phase : None or Phase
The resulting phase. By default, `None` uses the next phase.
old_phase : Phase
The phase that this action was created in.
"""
def __init__(
self,
game: Game,
source: GameObject,
/,
new_phase: Optional[Phase] = None,
*,
priority: float = 0.0,
canceled: bool = False,
):
super().__init__(game, source, priority=priority, canceled=canceled)
self.new_phase = new_phase
self._old_phase = self.game.current_phase
@property
def old_phase(self) -> Phase:
return self._old_phase
def doit(self):
if self.new_phase is None:
self.game.phase_system.bump_phase()
else:
self.game.phase_system.current_phase = self.new_phase
Pre = EPrePhaseChange
Post = EPostPhaseChange
class AbstractPhaseSystem(Subscriber):
"""Interface for a phase system.
It's possible to see all phases by using `game.phase_system.possible_phases`
"""
def __init__(self, game: Game, /, *, use_default_constraints: bool = True):
super().__init__(game, use_default_constraints=use_default_constraints)
self._startup = Phase(game, name="startup", action_resolution="instant")
self._shutdown = Phase(game, name="shutdown", action_resolution="instant")
@property
def startup(self) -> Phase:
return self._startup
@property
def shutdown(self) -> Phase:
return self._shutdown
@property
@abstractmethod
def possible_phases(self) -> Iterable[Phase]:
"""Returns all possible phases (as a new iterable).
If it is infinite, override __getitem__ as well!
"""
def __getitem__(self, key: str) -> Phase:
"""Returns the phase with the given name.
By default, iterates over all possible phases.
"""
if not isinstance(key, str):
raise TypeError(f"Expected key as str, got {key!r}")
for p in self.possible_phases:
if key == p.name:
return p
raise KeyError(key)
@property
@abstractmethod
def current_phase(self) -> Phase:
"""Returns the current phase."""
@current_phase.setter
@abstractmethod
def current_phase(self, v: Phase):
"""Sets the current phase."""
@abstractmethod
def bump_phase(self) -> Phase:
"""Updates the phase to use the next one, then returns the current one."""
@classmethod
def gen(cls, *args, **kwargs) -> Callable[[Game], AbstractPhaseSystem]:
"""Create a callable that generates a phase cycle."""
def func_gen(game: Game) -> AbstractPhaseSystem:
return cls(game, *args, **kwargs)
return func_gen
@handler
def system_phase_change(
self, event: ETryPhaseChange
) -> Optional[List[PhaseChangeAction]]:
"""Some external system asked for a phase change."""
if not isinstance(event, ETryPhaseChange):
return
return [PhaseChangeAction(self.game, self, event.new_phase)]
class SimplePhaseCycle(AbstractPhaseSystem):
"""Simple phase cycle definition.
Parameters
----------
game : Game
cycle : None or List[Tuple[str, ActionResolutionType]]
The cycle definition. Submit pairs of (name, resolution_type).
By default, uses `[("day", "instant"), ("night", "end_of_phase")]`
"""
_STARTUP = -1
_SHUTDOWN = -2
def __init__(
self,
game: Game,
/,
cycle: List[Tuple[str, ActionResolutionType]] = None,
current_phase: Optional[str] = None,
):
super().__init__(game)
if cycle is None:
cycle = [("day", "instant"), ("night", "end_of_phase")]
cphases = []
names = set()
for name, ar in cycle:
if name in names:
raise ValueError(f"Duplicate name {name!r} in {names}")
ar = ActionResolutionType(ar)
names.add(name)
cphases.append(Phase(game, name=name, action_resolution=ar))
self._cycle = cphases
self._i = self._STARTUP
if current_phase is not None:
self.current_phase = current_phase
@property
def cycle(self) -> List[Phase]:
return list(self._cycle)
@property
def possible_phases(self) -> List[Phase]:
return [self.startup, *self.cycle, self.shutdown]
@property
def current_phase(self) -> Phase:
"""Returns the current phase."""
if self._i == self._STARTUP:
return self.startup
elif self._i == self._SHUTDOWN:
return self.shutdown
i = self._i % len(self._cycle)
return self._cycle[i]
@current_phase.setter
def current_phase(self, v: Union[str, Phase]):
if isinstance(v, str):
new_phase = self[v]
else:
new_phase = v
if new_phase == self.startup:
# Maybe disallow going back to startup?
self._i = self._STARTUP
elif new_phase == self.shutdown:
self._i = self._SHUTDOWN
elif new_phase in self.possible_phases:
# Just move through all phases implicitly - we won't trigger anything
while self.current_phase != new_phase:
self._i += 1
else:
raise ValueError(f"No such phase found: {v!r}")
def bump_phase(self) -> Phase:
"""Updates the phase to use the next one, then returns the current one.
Trying to bump on `shutdown` phase will be ignored.
"""
if self._i == self._STARTUP:
self._i = 0
return self.current_phase
elif self._i == self._SHUTDOWN:
return self.current_phase
# raise ValueError(f"Cannot bump shutdown phase: {self.shutdown}")
self._i += 1
return self.current_phase
@classmethod
def gen(
cls,
cycle: List[Tuple[str, ActionResolutionType]] = None,
current_phase: Optional[str] = None,
) -> Callable[[Game], SimplePhaseCycle]:
"""Generator for a simple phase cycle."""
return super().gen(cycle=cycle, current_phase=current_phase)
| StarcoderdataPython |
4989166 | import argparse
import os
from liquidcss.workspace import WorkSpace
from liquidcss.settings import Settings, Messages, DocConfig
from liquidcss.utils import create_file_key, display_output
"""
Command: liquidcss status
Description:
Displays information about the files registered with the WorkSpace.
Positional Arguments:{id}
{id} - ID of the file.
Flags:
[-a --all] : Designates all files.
"""
workspace = WorkSpace(base_dir = os.getcwd())
settings = Settings(workspace = workspace)
def status(ids):
to_console = []
for id_ in ids:
doc_config = workspace.file_map.settings_from_id(id_ = id_, file_settings = DocConfig)
if not doc_config:
return [*to_console, Messages.id_not_registered]
to_console.append(Messages.status.format(**doc_config.values))
return to_console
def main(args):
parser = argparse.ArgumentParser(
prog="liquid status",
description="Displays information about the files registered with the WorkSpace.",
)
group = parser.add_mutually_exclusive_group(required = True)
group.add_argument(
'id',
nargs = "?",
help = "ID of the file."
)
group.add_argument(
"--all", "-a",
action = "store_true",
help="Designates all files.",
)
parsed_args = parser.parse_args(args)
settings.register_from_kwargs(**vars(parsed_args))
ids = tuple(
dict_['id'] for dict_ in workspace.file_map.content.values()
) if settings.all else tuple(parsed_args.id, )
to_console = status(ids = ids)
display_output(to_console)
| StarcoderdataPython |
5020885 | <gh_stars>0
import datetime
from django.db import models
from django.utils import timezone
from django.urls import reverse
from autoslug import AutoSlugField
class Donor(models.Model):
name = models.CharField(max_length=200, unique=True)
abbrev = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.abbrev
class Meta:
ordering = ('name',)
class Theme(models.Model):
theme = models.CharField(unique=True, max_length=50)
def __str__(self):
return self.theme
class Meta:
ordering = ('theme',)
class Zone(models.Model):
zone = models.CharField(unique=True, max_length=200)
def __str__(self):
return self.zone
class Meta:
ordering = ('zone',)
class Cfp(models.Model):
CURRENCY = (
('USD', 'US Dollars'),
('GBP', 'British Pound'),
('EUR', 'Euros'),
('KES', 'Kenya Shillings'),
('JPY', 'Japanese Yen'),
('CAD', 'Canadian Dollars'),
)
entered_at = models.DateTimeField(auto_now_add=True, editable=False)
donor = models.ForeignKey(Donor, on_delete=models.CASCADE)
title = models.CharField(
max_length=200, unique=True, verbose_name='Call for proposals title')
slug = AutoSlugField(max_length=255, null=True, editable=True, unique=True, populate_from='title')
link = models.URLField(verbose_name='Call for proposals website')
pub_date = models.DateField(verbose_name='Published')
closing_date_provided = models.BooleanField(
verbose_name='Closing date specified?')
closing_date = models.DateField(
null=True, blank=True, verbose_name='Closing date for applications')
themes = models.ManyToManyField(Theme)
zones = models.ManyToManyField(Zone)
type_of_projects = models.TextField()
# eligibility = models.TextField(verbose_name='Eligibility Criteria')
funding_currency = models.CharField(max_length=3, choices=CURRENCY)
grant_size_specified = models.BooleanField(
verbose_name='Has the grant size been specified?')
overall_budget_specified = models.BooleanField(
verbose_name='Has the overall budget been specified?')
overall_budget = models.FloatField(
null=True, blank=True, verbose_name='Total or overall budget available')
minimum_budget = models.FloatField(
null=True, blank=True, verbose_name='Minimum budget for a project')
maximum_budget = models.FloatField(
null=True, blank=True, verbose_name='Maximum budget for a project')
duration_specified = models.BooleanField(
verbose_name='Project duration specified?')
duration = models.PositiveIntegerField(
null=True, blank=True, verbose_name='Maximum duration(in months) for a project')
# how_to_apply = models.TextField()
apply_here = models.URLField(blank=True)
notes = models.TextField(blank=True)
def get_absolute_url(self):
return reverse('cfp:cfp_detail', args=[str(self.slug)])
def __str__(self):
return self.title
def past_deadline(self):
leo = datetime.date.today()
deadline = self.closing_date
return leo > deadline
def no_closing_date(self):
return self.closing_date is None
cfp = models.Manager()
| StarcoderdataPython |
3276642 | from __future__ import print_function, division
from dials.array_family import flex
from xfel.merging.application.worker import worker
from xfel.merging.application.reflection_table_utils import reflection_table_utils
try:
import resource
import platform
def get_memory_usage():
# getrusage returns kb on linux, bytes on mac
units_per_mb = 1024
if platform.system() == "Darwin":
units_per_mb = 1024*1024
return ('Memory usage: %.1f MB' % (int(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) / units_per_mb))
except ImportError:
def debug_memory_usage():
pass
class merger(worker):
"""
Merges multiple measurements of symmetry-reduced hkl's.
"""
def __repr__(self):
return "Merge multiple measurements of symmetry-reduced hkl's"
def merging_reflection_table(self):
'''Create a reflection table for storing merged hkl's'''
table = flex.reflection_table()
table['miller_index'] = flex.miller_index()
table['intensity'] = flex.double()
table['esd'] = flex.double()
table['rmsd'] = flex.double()
table['multiplicity'] = flex.int()
return table
def calc_reflection_intensity_stats(self, reflections):
'''Calculate intensity statistics for reflection table'''
multiplicity = len(reflections)
assert multiplicity != 0
stats = flex.mean_and_variance(reflections['intensity.sum.value'])
propagated_esd = (flex.sum(reflections['intensity.sum.variance']) ** 0.5)/ multiplicity
rmsd = 0.0
if multiplicity > 1:
rmsd = stats.unweighted_sample_standard_deviation()
return {'intensity' : stats.mean(),
'esd' : propagated_esd,
'rmsd' : rmsd,
'multiplicity' : multiplicity}
def output_merged_reflections(self, reflections):
merged_reflections_file_path = self.params.output.output_dir + '/merge.out'
merged_file = open(merged_reflections_file_path, 'w')
for ref in reflections:
merged_file.write("%s %f %f %f %d\n"%(ref.get('miller_index'), ref.get('intensity'), ref.get('esd'), ref.get('rmsd'), ref.get('multiplicity')))
merged_file.close()
def run(self, experiments, reflections):
# merge reflection intensities: calculate the average and other statistics
self.logger.log_step_time("AVERAGE")
self.logger.log("Averaging intensities...")
all_rank_merged_reflections = self.merging_reflection_table()
if len(reflections) > 0:
for hkl_reflection_table in reflection_table_utils.get_next_hkl_reflection_table(reflections):
intensity_stats = self.calc_reflection_intensity_stats(reflections=hkl_reflection_table)
intensity_stats['miller_index'] = hkl_reflection_table[0].get('miller_index_asymmetric')
all_rank_merged_reflections.append(intensity_stats)
self.logger.log("Merged intensities for %d HKLs"%(all_rank_merged_reflections.size()))
self.logger.log_step_time("AVERAGE", True)
# gather all merged intensities at rank 0
self.logger.log_step_time("GATHER")
if self.mpi_helper.rank != 0:
self.logger.log("Executing MPI gathering of all reflection tables at rank 0...")
all_merged_reflection_tables = self.mpi_helper.comm.gather(all_rank_merged_reflections, root = 0)
self.logger.log_step_time("GATHER", True)
# rank 0: concatenate all merged intensities into the final table
if self.mpi_helper.rank == 0:
self.logger.log_step_time("MERGE")
final_merged_reflection_table = self.merging_reflection_table()
self.logger.log("Performing final merging of reflection tables received from all ranks...")
for table in all_merged_reflection_tables:
final_merged_reflection_table.extend(table)
self.logger.main_log("Total merged HKLs: {}".format(final_merged_reflection_table.size()))
self.logger.log_step_time("MERGE", True)
# write the final merged reflection table out to an ASCII file
self.logger.log_step_time("WRITE")
self.output_merged_reflections(final_merged_reflection_table)
self.logger.log_step_time("WRITE", True)
return None, None
| StarcoderdataPython |
6638999 | """GetAsyncRequest message tests."""
from pyof.v0x04.controller2switch.get_async_request import GetAsyncRequest
from tests.unit.test_struct import TestStruct
class TestGetAsyncRequest(TestStruct):
"""Test the GetAsyncRequest message."""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUpClass()
super().set_raw_dump_file('v0x04', 'ofpt_get_async_request')
super().set_raw_dump_object(GetAsyncRequest, xid=3)
super().set_minimum_size(8)
| StarcoderdataPython |
26484 | from pyexocross.hitran.hitran import HITRANLinelist
from pyexocross.pyexocross import PyExocross
from pyexocross.exomol.exomolbroads import ExomolBroadener
import numpy as np
from pyexocross.util import create_grid_res, convert_to_wavenumber
from pyexocross.writer.hdf5writer import HDF5Writer
import matplotlib.pyplot as plt
wngrid = 10000/create_grid_res(15000,1.1,2.0)[::-1,0]
#hl_h2o = HITRANLinelist('/Users/ahmed/Documents/molecular_data/HITRAN/H2O/H2O.par')
hl_h2o= HITRANLinelist('/Users/ahmed/Documents/molecular_data/HITRAN/CH4/CH4.par')
#hl = HITRANLinelist('/Users/ahmed/Documents/molecular_data/HITRAN/CO2/12C16O2.par')
h2_h2o = ExomolBroadener(0.0209,0.027,filename='/Users/ahmed/Documents/molecular_data/HITRAN/CH4/1H2-16O__H2.broad',species='H2')
he_h2o = ExomolBroadener(0.0042,0.20,filename='/Users/ahmed/Documents/molecular_data/HITRAN/CH4/1H2-16O__He.broad',species='He')
hl_h2o.add_broadener(h2_h2o,ratio=0.704)
hl_h2o.add_broadener(he_h2o,ratio=0.121)
hl_h2o.add_self_broadener(ratio=0.1)
# h2_ch4 = ExomolBroadener(0.0603,0.5,filename='/Users/ahmed/Documents/molecular_data/HITRAN/CH4/12C-1H4__H2.broad',species='H2')
# he_ch4 = ExomolBroadener(0.0382,0.30,filename='/Users/ahmed/Documents/molecular_data/HITRAN/CH4/12C-1H4__He.broad',species='He')
# hl_ch4.add_broadener(h2_ch4,ratio=0.83)
# hl_ch4.add_broadener(he_ch4,ratio=0.17)
pyexo_h2o = PyExocross(hl_h2o)
#pyexo_ch4 = PyExocross(hl_ch4)
t = 200
p = 1.0
if __name__ == "__main__":
wn_h2o_self,xsec_h2o_self = pyexo_h2o.compute_xsec_parallel(wngrid,t,p, chunksize=1000, threshold=0.0, wing_cutoff=25.0,max_workers=2)
hl_h2o.set_broadener_ratio('self',ratio=1e-10)
wn_h2o,xsec_h2o = pyexo_h2o.compute_xsec_parallel(wngrid,t,p, chunksize=1000, threshold=0.0, wing_cutoff=25.0,max_workers=2)
#wn_ch4,xsec_ch4 = pyexo_ch4.compute_xsec(wngrid,t,p, chunksize=100, threshold=0.0, wing_cutoff=25.0)
plt.figure()
# plt.plot(wn,xsec,label='pyexo')
plt.plot(wn_h2o_self,xsec_h2o_self,label='H2O self')
plt.plot(wn_h2o,xsec_h2o,label='H2O')
#plt.plot(10000/wn_ch4,xsec_ch4,label='CH4')
plt.xlabel(r'Wavelength um')
plt.ylabel(r'Cross-section cm$^{2}$/molecule')
plt.yscale('log')
plt.legend()
plt.show()
| StarcoderdataPython |
3243410 | import os
import dbl
import discord
from discord.ext import commands, tasks
from ansura import AnsuraBot
class DBL(commands.Cog):
def __init__(self, bot: AnsuraBot):
self.bot = bot
self.token = os.getenv("DBL")
self.dblpy = dbl.DBLClient(self.bot, self.token, autopost=True)
@tasks.loop(seconds=600)
async def update_status(self):
await self.bot.change_presence(
status=discord.Status.online,
activity=discord.Activity(
name=str(len(self.bot.guilds)) + " servers | %help",
type=discord.ActivityType.watching
)
)
def setup(bot):
bot.add_cog(DBL(bot))
| StarcoderdataPython |
4911336 | <filename>select_market.py
""" Market selection module """
import pymysql.cursors
from app_head import get_head
from app_body import get_body
from app_page import set_page
from app_ogp import set_ogp
from app_metatags import get_metatags
from app_title import get_title
from bootstrap import get_bootstrap
from app_loading import get_loading_head, get_loading_body
from app_stylesheet import get_stylesheet
from app_navbar import navbar
from font_awesome import get_font_awesome
from app_cookie import get_sa_theme, user_get_uid
from googleanalytics import get_googleanalytics
from sa_db import sa_db_access
ACCESS_OBJ = sa_db_access()
DB_USR = ACCESS_OBJ.username()
DB_PWD = ACCESS_OBJ.password()
DB_NAME = ACCESS_OBJ.db_name()
DB_SRV = ACCESS_OBJ.db_server()
def save_selectmarket(burl, sel):
""" xxx """
return_data = set_page(get_head('<meta http-equiv="refresh" content="0;URL=' +\
burl + 'genportf/?acm='+\
str(sel) +'&step=1¬start=0" />') + get_body('', '',''))
user_id = user_get_uid()
connection = pymysql.connect(host=DB_SRV,
user=DB_USR,
password=<PASSWORD>,
db=DB_NAME,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor(pymysql.cursors.SSCursor)
sql = "UPDATE users SET default_profile='"+ str(sel) +"' WHERE uid='" + str(user_id) +"'"
cursor.execute(sql)
connection.commit()
cursor.close()
connection.close()
return return_data
def get_market_list(burl):
""" xxx """
return_data = ''
connection = pymysql.connect(host=DB_SRV,
user=DB_USR,
password=<PASSWORD>,
db=DB_NAME,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor(pymysql.cursors.SSCursor)
sql = "SELECT asset_class_id, asset_class_name FROM asset_class ORDER BY asset_class_id"
cursor.execute(sql)
res = cursor.fetchall()
label = 'x'
return_data = '<div class="list-group">'
for row in res:
asset_class_id = row[0]
asset_class_name = row[1]
label = asset_class_name
#handle particularities
if asset_class_id == 'EQ:':
label = 'All stocks'
if asset_class_id == 'BD:':
label = 'x'
if asset_class_id == 'MA:':
label = 'x'
if asset_class_id == 'CO:':
label = 'x'
if asset_class_id == 'PRF:':
label = 'x'
if not label == 'x':
return_data = return_data +\
' <a href="'+\
burl +'n/?step=d&x='+\
asset_class_id +\
'" class="list-group-item list-group-item-action">'+\
label +'</a>'
sql = "SELECT market_id, market_label FROM markets order by market_label"
cursor.execute(sql)
res = cursor.fetchall()
for row in res:
market_id = row[0]
market_label = row[1]
label = market_label + ' Market'
if not label == 'x':
return_data = return_data +\
' <a href="'+\
burl +\
'n/?step=d&x='+\
market_id +\
'" class="list-group-item list-group-item-action">'+\
label +'</a>'
return_data = return_data + '</div>'
cursor.close()
connection.close()
return return_data
def get_selectmarket_box(burl, mode):
""" xxx """
box_content = ''
if mode == 'portf':
l_desc_part_1 = "Select a market for your portfolio"
l_desc_part_2 = "Pick from the list below..."
else:
l_desc_part_1 = "What do you most frequently trade?"
l_desc_part_2 = "Pick a Market from the list below..."
box_content = '<div class="box-top">' +\
' <div class="row">'+\
' <div class="col-lg-12 col-md-12 col-sm-12 col-xs-12">'+\
' <div class="box-part rounded sa-center-content">'+\
' <div class="alert alert-success" role="alert">' +\
' <h5><i class="fas fa-chart-line"></i> '+\
l_desc_part_1 +'</h5>'+\
l_desc_part_2 +\
' </div><div> </div>'+\
get_market_list(burl) +\
' </div>'+\
' </div>'+\
' </div>'+\
'</div>'
return box_content
def gen_selectmarket_page(appname, burl, mode, terminal):
""" xxx """
return_data = ''
return_data = get_head(get_loading_head() +\
get_googleanalytics() +\
get_title(appname) +\
get_metatags(burl) +\
set_ogp(burl, 1, '', '') +\
get_bootstrap(get_sa_theme(), burl) +\
get_font_awesome() +\
get_stylesheet(burl))
return_data = return_data + get_body(get_loading_body(), navbar(burl, 0, terminal) +\
get_selectmarket_box(burl, mode),'')
return_data = set_page(return_data)
return return_data
| StarcoderdataPython |
1685210 | <reponame>Jason-Khan/mmediting
import torch.nn as nn
import torch
from mmedit.models.builder import build_component
from mmedit.models.registry import BACKBONES
import torch.nn.functional as F
from mmseg.core import add_prefix
from mmseg.ops import resize
from ... import builder
from mmcv.runner import auto_fp16, load_checkpoint
from mmedit.utils import get_root_logger
@BACKBONES.register_module()
class SwinuperEncoderDecoder(nn.Module):
"""Swin+Upernet Encoder-Decoder used in Global&Local model.
This implementation follows:
Globally and locally Consistent Image Completion
The architecture of the encoder-decoder is:\
(conv2d x 6) --> (dilated conv2d x 4) --> (conv2d or deconv2d x 7)
Args:
encoder (dict): Config dict to encoder.
decoder (dict): Config dict to build decoder.
dilation_neck (dict): Config dict to build dilation neck.
"""
def __init__(self,
backbone,
decode_head,
pretrained=None):
super().__init__()
self.backbone = builder.build_backbone(backbone)
self._init_decode_head(decode_head)
self.init_weights(pretrained=pretrained)
assert hasattr(self, 'decode_head') and self.decode_head is not None
def _init_decode_head(self, decode_head):
"""Initialize ``decode_head``"""
self.decode_head = builder.build_head(decode_head)
self.align_corners = self.decode_head.align_corners
self.num_classes = self.decode_head.num_classes
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone and heads.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
self.backbone.init_weights(pretrained=pretrained)
self.decode_head.init_weights()
def extract_feat(self, img):
"""Extract features from images."""
x = self.backbone(img)
return x
@auto_fp16()
def forward(self, img):
"""Forward Function.
Args:
img (torch.Tensor): Input tensor with shape of (n, c+1, h, w).
Last channel is mask.
Returns:
torch.Tensor: Output tensor with shape of (n, c, h', w').
"""
x = self.extract_feat(img[:, :-1])
out = self.decode_head(x)
out = resize(
input=out,
size=img.shape[2:],
mode='bilinear',
align_corners=self.align_corners)
return out
| StarcoderdataPython |
6509462 | from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.PipRequirement import PipRequirement
@linter(executable='cppclean',
output_format='regex',
output_regex=r'.+:(?P<line>\d+):(?P<message>.*)')
class CPPCleanBear:
"""
Find problems in C++ source code that slow down development in large code
bases. This includes finding unused code, among other features.
Read more about available routines at
<https://github.com/myint/cppclean#features>.
"""
LANGUAGES = {'C++'}
REQUIREMENTS = {PipRequirement('cppclean', '0.12.0')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'<EMAIL>'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Smell', 'Unused Code', 'Security'}
@staticmethod
def create_arguments(filename, file, config_file):
return filename,
| StarcoderdataPython |
1813787 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class ToughAnimationCasesPage(page_module.Page):
def __init__(self, url, page_set, need_measurement_ready):
super(ToughAnimationCasesPage, self).__init__(url=url, page_set=page_set)
self.archive_data_file = 'data/tough_animation_cases.json'
self._need_measurement_ready = need_measurement_ready
def RunNavigateSteps(self, action_runner):
super(ToughAnimationCasesPage, self).RunNavigateSteps(action_runner)
if self._need_measurement_ready:
action_runner.WaitForJavaScriptCondition('window.measurementReady')
def RunPageInteractions(self, action_runner):
with action_runner.CreateInteraction('ToughAnimation'):
action_runner.Wait(10)
class ToughAnimationCasesPageSet(page_set_module.PageSet):
"""
Description: A collection of animation performance tests
"""
def __init__(self):
super(ToughAnimationCasesPageSet, self).__init__(
archive_data_file='data/tough_animation_cases.json',
bucket=page_set_module.PARTNER_BUCKET)
urls_list_one = [
# Why: Tests the balls animation implemented with SVG animations.
'file://tough_animation_cases/balls_svg_animations.html',
# Why: Tests the balls animation implemented with Javascript and canvas.
'file://tough_animation_cases/balls_javascript_canvas.html',
# Why: Tests the balls animation implemented with Javascript and CSS.
'file://tough_animation_cases/balls_javascript_css.html',
# Why: Tests the balls animation implemented with CSS keyframe animations.
'file://tough_animation_cases/balls_css_keyframe_animations.html',
# Why: Tests the balls animation implemented with transforms and CSS
# keyframe animations to be run on the compositor thread.
# pylint: disable=C0301
'file://tough_animation_cases/balls_css_keyframe_animations_composited_transform.html',
# Why: Tests the balls animation implemented with CSS transitions on 2
# properties.
'file://tough_animation_cases/balls_css_transition_2_properties.html',
# Why: Tests the balls animation implemented with CSS transitions on 40
# properties.
'file://tough_animation_cases/balls_css_transition_40_properties.html',
# Why: Tests the balls animation implemented with CSS transitions on all
# animatable properties.
'file://tough_animation_cases/balls_css_transition_all_properties.html',
# pylint: disable=C0301
'file://tough_animation_cases/overlay_background_color_css_transitions.html',
# Why: Tests many CSS Transitions all starting at the same time triggered
# by inserting new elements.
# pylint: disable=C0301
'file://tough_animation_cases/css_transitions_simultaneous_by_inserting_new_element.html?N=0316',
# Why: Tests many CSS Transitions all starting at the same time triggered
# by inserting style sheets.
# pylint: disable=C0301
'file://tough_animation_cases/css_transitions_simultaneous_by_inserting_style_element.html?N=0316',
# Why: Tests many CSS Transitions all starting at the same time triggered
# by updating class.
# pylint: disable=C0301
'file://tough_animation_cases/css_transitions_simultaneous_by_updating_class.html?N=0316',
# Why: Tests many CSS Transitions all starting at the same time triggered
# by updating inline style.
# pylint: disable=C0301
'file://tough_animation_cases/css_transitions_simultaneous_by_updating_inline_style.html?N=0316',
# Why: Tests many CSS Transitions chained together using events at
# different times triggered by inserting new elements.
# pylint: disable=C0301
'file://tough_animation_cases/css_transitions_staggered_chaining_by_inserting_new_element.html?N=0316',
# Why: Tests many CSS Transitions chained together using events at
# different times triggered by inserting style sheets.
# pylint: disable=C0301
'file://tough_animation_cases/css_transitions_staggered_chaining_by_inserting_style_element.html?N=0316',
# Why: Tests many CSS Transitions chained together using events at
# different times triggered by updating class.
# pylint: disable=C0301
'file://tough_animation_cases/css_transitions_staggered_chaining_by_updating_class.html?N=0316',
# Why: Tests many CSS Transitions chained together using events at
# different times triggered by updating inline style.
# pylint: disable=C0301
'file://tough_animation_cases/css_transitions_staggered_chaining_by_updating_inline_style.html?N=0316',
# Why: Tests many CSS Transitions starting at different times triggered by
# inserting new elements.
# pylint: disable=C0301
'file://tough_animation_cases/css_transitions_staggered_triggering_by_inserting_new_element.html?N=0316',
# Why: Tests many CSS Transitions starting at different times triggered by
# inserting style sheets.
# pylint: disable=C0301
'file://tough_animation_cases/css_transitions_staggered_triggering_by_inserting_style_element.html?N=0316',
# Why: Tests many CSS Transitions starting at different times triggered by
# updating class.
# pylint: disable=C0301
'file://tough_animation_cases/css_transitions_staggered_triggering_by_updating_class.html?N=0316',
# Why: Tests many CSS Transitions starting at different times triggered by
# updating inline style.
# pylint: disable=C0301
'file://tough_animation_cases/css_transitions_staggered_triggering_by_updating_inline_style.html?N=0316',
# Why: Tests many CSS Animations all starting at the same time with 500
# keyframes each.
'file://tough_animation_cases/css_animations_many_keyframes.html?N=0316',
# Why: Tests many CSS Animations all starting at the same time triggered
# by inserting new elements.
# pylint: disable=C0301
'file://tough_animation_cases/css_animations_simultaneous_by_inserting_new_element.html?N=0316',
# Why: Tests many CSS Animations all starting at the same time triggered
# by inserting style sheets.
# pylint: disable=C0301
'file://tough_animation_cases/css_animations_simultaneous_by_inserting_style_element.html?N=0316',
# Why: Tests many CSS Animations all starting at the same time triggered
# by updating class.
# pylint: disable=C0301
'file://tough_animation_cases/css_animations_simultaneous_by_updating_class.html?N=0316',
# Why: Tests many CSS Animations all starting at the same time triggered
# by updating inline style.
# pylint: disable=C0301
'file://tough_animation_cases/css_animations_simultaneous_by_updating_inline_style.html?N=0316',
# Why: Tests many CSS Animations chained together using events at
# different times triggered by inserting new elements.
# pylint: disable=C0301
'file://tough_animation_cases/css_animations_staggered_chaining_by_inserting_new_element.html?N=0316',
# Why: Tests many CSS Animations chained together using events at
# different times triggered by inserting style sheets.
# pylint: disable=C0301
'file://tough_animation_cases/css_animations_staggered_chaining_by_inserting_style_element.html?N=0316',
# Why: Tests many CSS Animations chained together using events at
# different times triggered by updating class.
# pylint: disable=C0301
'file://tough_animation_cases/css_animations_staggered_chaining_by_updating_class.html?N=0316',
# Why: Tests many CSS Animations chained together using events at
# different times triggered by updating inline style.
# pylint: disable=C0301
'file://tough_animation_cases/css_animations_staggered_chaining_by_updating_inline_style.html?N=0316',
# Why: Tests many CSS Animations starting at different times triggered by
# inserting new elements.
# pylint: disable=C0301
'file://tough_animation_cases/css_animations_staggered_triggering_by_inserting_new_element.html?N=0316',
# Why: Tests many CSS Animations all starting at the same time with
# staggered animation offsets.
# pylint: disable=C0301
'file://tough_animation_cases/css_animations_staggered_infinite_iterations.html?N=0316',
# Why: Tests many CSS Animations starting at different times triggered by
# inserting style sheets.
# pylint: disable=C0301
'file://tough_animation_cases/css_animations_staggered_triggering_by_inserting_style_element.html?N=0316',
# Why: Tests many CSS Animations starting at different times triggered by
# updating class.
# pylint: disable=C0301
'file://tough_animation_cases/css_animations_staggered_triggering_by_updating_class.html?N=0316',
# Why: Tests many CSS Animations starting at different times triggered by
# updating inline style.
# pylint: disable=C0301
'file://tough_animation_cases/css_animations_staggered_triggering_by_updating_inline_style.html?N=0316',
# Why: Tests many Web Animations all starting at the same time with 500
# keyframes each.
'file://tough_animation_cases/web_animations_many_keyframes.html?N=0316',
# Why: Tests many paused Web Animations having their currentTimes updated
# in every requestAnimationFrame.
# pylint: disable=C0301
'file://tough_animation_cases/web_animations_set_current_time_in_raf.html?N=0316',
# Why: Tests many Web Animations all starting at the same time.
'file://tough_animation_cases/web_animations_simultaneous.html?N=0316',
# Why: Tests many Web Animations all starting at different times then
# chained together using events.
# pylint: disable=C0301
'file://tough_animation_cases/web_animations_staggered_chaining.html?N=0316',
# Why: Tests many Web Animations all starting at different times with
# infinite iterations.
# pylint: disable=C0301
'file://tough_animation_cases/web_animations_staggered_infinite_iterations.html?N=0316',
# Why: Tests many Web Animations all starting at different times.
# pylint: disable=C0301
'file://tough_animation_cases/web_animations_staggered_triggering.html?N=0316',
# Why: Tests color animations using CSS Animations.
# pylint: disable=C0301
'file://tough_animation_cases/css_value_type_color.html?api=css_animations&N=0316',
# Why: Tests filter animations using CSS Animations.
# pylint: disable=C0301
'file://tough_animation_cases/css_value_type_filter.html?api=css_animations&N=0316',
# Why: Tests length 3D animations using CSS Animations.
# pylint: disable=C0301
'file://tough_animation_cases/css_value_type_length_3d.html?api=css_animations&N=0316',
# Why: Tests complex length animations using CSS Animations.
# pylint: disable=C0301
'file://tough_animation_cases/css_value_type_length_complex.html?api=css_animations&N=0316',
# Why: Tests simple length animations using CSS Animations.
# pylint: disable=C0301
'file://tough_animation_cases/css_value_type_length_simple.html?api=css_animations&N=0316',
# Why: Tests shadow animations using CSS Animations.
# pylint: disable=C0301
'file://tough_animation_cases/css_value_type_shadow.html?api=css_animations&N=0316',
# Why: Tests complex transform animations using CSS Animations.
# pylint: disable=C0301
'file://tough_animation_cases/css_value_type_transform_complex.html?api=css_animations&N=0316',
# Why: Tests simple transform animations using CSS Animations.
# pylint: disable=C0301
'file://tough_animation_cases/css_value_type_transform_simple.html?api=css_animations&N=0316',
# Why: Tests color animations using Web Animations.
# pylint: disable=C0301
'file://tough_animation_cases/css_value_type_color.html?api=web_animations&N=0316',
# Why: Tests length 3D animations using Web Animations.
# pylint: disable=C0301
'file://tough_animation_cases/css_value_type_length_3d.html?api=web_animations&N=0316',
# Why: Tests complex length animations using Web Animations.
# pylint: disable=C0301
'file://tough_animation_cases/css_value_type_length_complex.html?api=web_animations&N=0316',
# Why: Tests simple length animations using Web Animations.
# pylint: disable=C0301
'file://tough_animation_cases/css_value_type_length_simple.html?api=web_animations&N=0316',
# Why: Tests shadow animations using Web Animations.
# pylint: disable=C0301
'file://tough_animation_cases/css_value_type_shadow.html?api=web_animations&N=0316',
# Why: Tests complex transform animations using Web Animations.
# pylint: disable=C0301
'file://tough_animation_cases/css_value_type_transform_complex.html?api=web_animations&N=0316',
# Why: Tests simple transform animations using Web Animations.
# pylint: disable=C0301
'file://tough_animation_cases/css_value_type_transform_simple.html?api=web_animations&N=0316',
]
for url in urls_list_one:
self.AddUserStory(ToughAnimationCasesPage(url, self,
need_measurement_ready=True))
urls_list_two = [
# Why: Tests various keyframed animations.
'file://tough_animation_cases/keyframed_animations.html',
# Why: Tests various transitions.
'file://tough_animation_cases/transform_transitions.html',
# Why: JS execution blocks CSS transition unless initial transform is set.
'file://tough_animation_cases/transform_transition_js_block.html'
# Disabled: crbug.com/350692
# Why: Login page is slow because of ineffecient transform operations.
# 'http://ie.microsoft.com/testdrive/performance/robohornetpro/',
]
for url in urls_list_two:
self.AddUserStory(ToughAnimationCasesPage(url, self,
need_measurement_ready=False))
| StarcoderdataPython |
5047405 | <reponame>poldracklab/bids-core
import os
import copy
import logging
import pymongo
import datetime
logging.basicConfig(
format='%(asctime)s %(name)16.16s %(filename)24.24s %(lineno)5d:%(levelname)4.4s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.DEBUG,
)
log = logging.getLogger('scitran.api')
logging.getLogger('MARKDOWN').setLevel(logging.WARNING) # silence Markdown library
logging.getLogger('requests').setLevel(logging.WARNING) # silence Requests library
logging.getLogger('paste.httpserver').setLevel(logging.WARNING) # silence Paste library
# NOTE: Keep in sync with environment variables in sample.config file.
DEFAULT_CONFIG = {
'core': {
'log_level': 'info',
'debug': False,
'insecure': False,
'newrelic': None,
'drone_secret': None,
},
'site': {
'id': 'local',
'name': 'Local',
'url': 'https://localhost/api',
'central_url': 'https://sdmc.scitran.io/api',
'registered': False,
'ssl_cert': None,
},
'auth': {
'google': {
'client_id': '1052740023071-n20pk8h5uepdua3r8971pc6jrf25lvee.apps.googleusercontent.com',
'id_endpoint': 'https://www.googleapis.com/plus/v1/people/me/openIdConnect',
'auth_endpoint': 'https://accounts.google.com/o/oauth2/auth',
'verify_endpoint': 'https://www.googleapis.com/oauth2/v1/tokeninfo',
},
'orcid': {
'client_id': 'APP-B03HAPXN425Y5C95',
'api_endpoint': 'https://pub.orcid.org',
},
},
'persistent': {
'db_uri': 'mongodb://localhost:9001/scitran',
'db_connect_timeout': '2000',
'db_server_selection_timeout': '3000',
'data_path': os.path.join(os.path.dirname(__file__), '../persistent/data'),
},
}
__config = copy.deepcopy(DEFAULT_CONFIG)
__config_persisted = False
__last_update = datetime.datetime.utcfromtimestamp(0)
#FIXME What is this?
#os.environ['PYTHON_EGG_CACHE'] = '/tmp/python_egg_cache'
#os.umask(0o022)
for outer_key, scoped_config in __config.iteritems():
for inner_key in scoped_config:
if type(scoped_config[inner_key]) is dict:
for inner_key_field, inner_key_value in scoped_config[inner_key].iteritems():
key = 'SCITRAN_' + outer_key.upper() + '_' + inner_key.upper() + '_' + inner_key_field.upper()
if key in os.environ:
value = os.environ[key]
if value.lower() == 'true':
value = True
elif value.lower() == 'false':
value = False
elif value.lower() == 'none':
value = None
__config[outer_key][inner_key][inner_key_field] = value
else:
key = 'SCITRAN_' + outer_key.upper() + '_' + inner_key.upper()
if key in os.environ:
value = os.environ[key]
if value.lower() == 'true':
value = True
elif value.lower() == 'false':
value = False
elif value.lower() == 'none':
value = None
__config[outer_key][inner_key] = value
if not os.path.exists(__config['persistent']['data_path']):
os.makedirs(__config['persistent']['data_path'])
log.setLevel(getattr(logging, __config['core']['log_level'].upper()))
db = pymongo.MongoClient(
__config['persistent']['db_uri'],
j=True, # Requests only return once write has hit the DB journal
connectTimeoutMS=__config['persistent']['db_connect_timeout'],
serverSelectionTimeoutMS=__config['persistent']['db_server_selection_timeout'],
connect=False, # Connect on first operation to avoid multi-threading related errors
).get_default_database()
log.debug(str(db))
def initialize_db():
log.info('Initializing database')
if not db.system.indexes.find_one():
log.info('Creating database indexes')
# TODO jobs indexes
# TODO review all indexes
db.projects.create_index([('gid', 1), ('name', 1)])
db.sessions.create_index('project')
db.sessions.create_index('uid')
db.acquisitions.create_index('session')
db.acquisitions.create_index('uid')
db.acquisitions.create_index('collections')
db.analytics.create_index('timestamp')
db.authtokens.create_index('timestamp', expireAfterSeconds=600)
db.uploads.create_index('timestamp', expireAfterSeconds=60)
db.downloads.create_index('timestamp', expireAfterSeconds=60)
now = datetime.datetime.utcnow()
db.groups.update_one({'_id': 'unknown'}, {'$setOnInsert': { 'created': now, 'modified': now, 'name': 'Unknown', 'roles': []}}, upsert=True)
db.sites.replace_one({'_id': __config['site']['id']}, {'name': __config['site']['name'], 'site_url': __config['site']['url']}, upsert=True)
def get_config():
global __last_update, __config, __config_persisted
now = datetime.datetime.utcnow()
if not __config_persisted:
initialize_db()
log.info('Persisting configuration')
__config['created'] = __config['modified'] = now
__config['latest'] = True
r = db.config.replace_one({'latest': True}, __config, upsert=True)
__config_persisted = bool(r.modified_count)
__last_update = now
elif now - __last_update > datetime.timedelta(seconds=120):
log.debug('Refreshing configuration from database')
__config = db.config.find_one({'latest': True})
__last_update = now
log.setLevel(getattr(logging, __config['core']['log_level'].upper()))
return __config
def get_public_config():
return {
'created': __config.get('created'),
'modified': __config.get('modified'),
'site': __config.get('site'),
'auth': __config.get('auth'),
}
def get_item(outer, inner):
return get_config()[outer][inner]
| StarcoderdataPython |
6650607 | <filename>vkmodels/objects/comment.py
import dataclasses
import enum
import typing
from vkmodels.bases.object import ObjectBase
@dataclasses.dataclass
class Thread(
ObjectBase,
):
count: int
can_post: typing.Optional[bool] = None
groups_can_post: typing.Optional[bool] = None
items: typing.Optional[typing.List[WallComment]] = None
show_reply_button: typing.Optional[bool] = None
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.