id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3327606 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 22 18:46:41 2019
@author: JVM
"""
import requests, tempfile, os
#converter api help http://synbiodex.github.io/SBOL-Validator/?javascript#introduction
def DNA_to_GenBank(filename, partname):
newfile_url = "http://song.ece.utah.edu/examples/pages/acceptNewFile.php"
temp = tempfile.NamedTemporaryFile(suffix=".dna")
get_url = "http://song.ece.utah.edu/dnafiles/" + os.path.basename(temp.name)[:-4]
partfile = requests.get(filename).content
temp.write(partfile)
temp.flush()
temp.seek(0)
files = {'fileToUpload': temp}
#upload file
res = requests.post(newfile_url, files=files,
headers = {"Accept":"text/plain"})
#print(res)
temp.close()
#request genebank
s = requests.get(f"{get_url}.gb")
genebank = s.text
request = { 'options': {'language' : 'SBOL2',
'test_equality': False,
'check_uri_compliance': False,
'check_completeness': False,
'check_best_practices': False,
'fail_on_first_error': False,
'provide_detailed_stack_trace': False,
'uri_prefix': 'trial',
'version': '',
'insert_type': False
},
'return_file': True,
'main_file': genebank
}
resp = requests.post("https://validator.sbolstandard.org/validate/", json=request)
content = resp.json()
return content["result"]
| StarcoderdataPython |
8557 | <filename>mpunet/bin/cv_split.py
from glob import glob
import sys
import os
import numpy as np
import random
from mpunet.utils import create_folders
import argparse
def get_parser():
parser = argparse.ArgumentParser(description="Prepare a data folder for a"
"CV experiment setup.")
parser.add_argument("--data_dir", type=str,
help="Path to data directory")
parser.add_argument("--CV", type=int, default=5,
help="Number of splits (default=5)")
parser.add_argument("--out_dir", type=str, default="views",
help="Directory to store CV subfolders "
"(default=views")
parser.add_argument("--im_sub_dir", type=str, default="images",
help="Subfolder under 'data_dir' in which image are "
"stored (default=images)")
parser.add_argument("--lab_sub_dir", type=str, default="labels",
help="Subfolder under 'data_dir' in which labels are "
"stored (default=labels)")
parser.add_argument("--copy", action="store_true",
help="Copy files to CV-subfolders instead of "
"symlinking (not recommended)")
parser.add_argument("--file_list", action="store_true",
help="Create text files with paths pointing to the "
"images at the image and labels subdirs under "
"each split instead of symlink/copying. This is"
" usefull on systems were symlink is not "
"supported, but the dataset size is too large to"
" store in copies. NOTE: Only one of --copy and "
"--file_list flags must be set.")
parser.add_argument("--file_regex", type=str, default="*.nii*",
help="Regex used to select files from the image "
"and labels subdirs. (default='*.nii*')")
parser.add_argument("--validation_fraction", type=float, default=0.20,
help="Fraction of OVERALL data size used for "
"validation in each split. In a 5-CV setting with "
"N=100 and val_frac=0.20, each split will have "
"N_train=60, N_val=20 and N_test=20 images")
parser.add_argument("--test_fraction", type=float, default=0.20,
help="Fraction of data size used for test if CV=1.")
parser.add_argument("--common_prefix_length", type=int, required=False, default=0)
return parser
def assert_dir_structure(data_dir, im_dir, lab_dir, out_dir):
for _dir in (data_dir, im_dir, lab_dir):
if not os.path.exists(_dir):
raise OSError("Invalid data directory '%s'. Does not exist." % data_dir)
if os.path.exists(out_dir):
raise OSError("Output directory at '%s' already exists." % out_dir)
def create_view_folders(out_dir, n_splits):
if not os.path.exists(out_dir):
print("Creating directory at %s" % out_dir)
os.makedirs(out_dir)
if n_splits > 1:
for i in range(n_splits):
split_dir = os.path.join(out_dir, "split_%i" % i)
print("Creating directory at %s" % split_dir)
os.mkdir(split_dir)
def pair_by_names(images, common_prefix_length):
if common_prefix_length == 0:
return images
from collections import defaultdict
names = [os.path.split(i)[-1][:common_prefix_length] for i in images]
inds = defaultdict(list)
for i, item in enumerate(names):
inds[item].append(i)
pairs = inds.values()
return [tuple(np.array(images)[i]) for i in pairs]
def add_images(images, im_folder_path, label_folder_path, im_dir, lab_dir,
link_func=os.symlink):
for image in images:
if not isinstance(image, (list, tuple, np.ndarray)):
image = (image,)
for im in image:
# Get file name
file_name = os.path.split(im)[-1]
# Get label path (OBS: filenames must match!)
lab = im.replace(im_dir, lab_dir)
if not os.path.exists(lab):
raise OSError("No label file found at '%s'. OBS: image and "
"label files must have exactly the same name. "
"Images should be located at '%s' and labels at"
" '%s'" % (lab, im_folder_path, label_folder_path))
# Get relative paths
rel_image = os.path.relpath(im, im_folder_path)
rel_label = os.path.relpath(lab, label_folder_path)
# Symlink or copy
link_func(rel_image, im_folder_path + "/%s" % file_name)
link_func(rel_label, label_folder_path + "/%s" % file_name)
def _add_to_file_list_fallback(rel_image_path, image_path,
fname="LIST_OF_FILES.txt"):
"""
On some system synlinks are not supported, if --files_list flag is set,
uses this function to add each absolute file path to a list at the final
subfolder that is supposed to store images and label links or actual files
At run-time, these files must be loaded by reading in the path from these
files instead.
"""
# Get folder where list of files should be stored
folder = os.path.split(image_path)[0]
# Get absolute path to image
# We change dir to get the correct abs path from the relative
os.chdir(folder)
abs_file_path = os.path.abspath(rel_image_path)
# Get path to the list of files
list_file_path = os.path.join(folder, fname)
with open(list_file_path, "a") as out_f:
out_f.write(abs_file_path + "\n")
def entry_func(args=None):
# Get parser
parser = vars(get_parser().parse_args(args))
# Get arguments
data_dir = os.path.abspath(parser["data_dir"])
n_splits = int(parser["CV"])
if n_splits > 1:
out_dir = os.path.join(data_dir, parser["out_dir"], "%i_CV" % n_splits)
else:
out_dir = os.path.join(data_dir, parser["out_dir"], "fixed_split")
im_dir = os.path.join(data_dir, parser["im_sub_dir"])
lab_dir = os.path.join(data_dir, parser["lab_sub_dir"])
copy = parser["copy"]
file_list = parser["file_list"]
regex = parser["file_regex"]
val_frac = parser["validation_fraction"]
test_frac = parser["test_fraction"]
common_prefix_length = parser["common_prefix_length"]
if n_splits == 1 and not test_frac:
raise ValueError("Must specify --test_fraction with --CV=1.")
if copy and file_list:
raise ValueError("Only one of --copy and --file_list "
"flags must be set.")
# Assert suitable folders
assert_dir_structure(data_dir, im_dir, lab_dir, out_dir)
# Create sub-folders
create_view_folders(out_dir, n_splits)
# Get images and pair by subject identifier if common_prefix_length > 0
images = glob(os.path.join(im_dir, regex))
images = pair_by_names(images, common_prefix_length)
print("-----")
print("Found {} images".format(len(images)))
# Get validation size
N_total = len(images)
if n_splits > 1:
N_test = N_total // n_splits
else:
N_test = int(np.ceil(N_total * test_frac))
N_val = int(np.ceil(N_total * val_frac))
if N_val + N_test >= N_total:
raise ValueError("Too large validation_fraction - "
"No training samples left!")
N_train = N_total - N_test - N_val
print("Total images:".ljust(40), N_total)
print("Train images pr. split:".ljust(40), N_train)
print("Validation images pr. split:".ljust(40), N_val)
print("Test images pr. split:".ljust(40), N_test)
# Shuffle and split the images into CV parts
random.shuffle(images)
splits = np.array_split(images, n_splits)
# Symlink / copy files
for i, split in enumerate(splits):
print(" Split %i/%i" % (i+1, n_splits), end="\r", flush=True)
# Set root path to split folder
if n_splits > 1:
split_path = os.path.join(out_dir, "split_%i" % i)
else:
split_path = out_dir
# Here we kind of hacky force the following code to work with CV=1
# Define a test set and overwrite the current split (which stores
# add the data, as splits was never split with n_splits=1
split = splits[0][:N_test]
# Overwrite the splits variable to a length 2 array with the
# remaining data which will be used as val+train. The loop still
# refers to the old split and thus will only execute once
splits = [split, splits[0][N_test:]]
# Define train, val and test sub-dirs
train_path = os.path.join(split_path, "train")
train_im_path = os.path.join(train_path, parser["im_sub_dir"])
train_label_path = os.path.join(train_path, parser["lab_sub_dir"])
if N_val:
val_path = os.path.join(split_path, "val")
val_im_path = os.path.join(val_path, parser["im_sub_dir"])
val_label_path = os.path.join(val_path, parser["lab_sub_dir"])
else:
val_path, val_im_path, val_label_path = (None,) * 3
test_path = os.path.join(split_path, "test")
test_im_path = os.path.join(test_path, parser["im_sub_dir"])
test_label_path = os.path.join(test_path, parser["lab_sub_dir"])
# Create folders if not existing
create_folders([train_path, val_path, train_im_path, train_label_path,
val_im_path, val_label_path, test_path, test_im_path,
test_label_path])
# Copy or symlink?
if copy:
from shutil import copyfile
move_func = copyfile
elif file_list:
move_func = _add_to_file_list_fallback
else:
move_func = os.symlink
# Add test data to test folder
add_images(split, test_im_path, test_label_path, im_dir, lab_dir, move_func)
# Join remaining splits into train+val
remaining = [x for ind, x in enumerate(splits) if ind != i]
remaining = [item for sublist in remaining for item in sublist]
# Extract validation data from the remaining
random.shuffle(remaining)
validation = remaining[:N_val]
training = remaining[N_val:]
# Add
if validation:
add_images(validation, val_im_path, val_label_path, im_dir, lab_dir, move_func)
add_images(training, train_im_path, train_label_path, im_dir, lab_dir, move_func)
if __name__ == "__main__":
entry_func()
| StarcoderdataPython |
3302524 | from django.shortcuts import render
from .models import ShortURL
from .forms import CreateNewShortURL
from datetime import datetime
import random, string
# Create your views here.
def home(request):
return render(request, 'home.html')
def redirect(request, url):
current_obj = ShortURL.objects.filter(short_url=url)
if len(current_obj) == 0:
return render(request, 'pagenotfound.html')
context = {'obj':current_obj[0]}
return render(request, 'redirect.html', context)
def createShortURL(request):
if request.method == 'POST':
form = CreateNewShortURL(request.POST)
if form.is_valid():
original_website = form.cleaned_data['original_url']
random_chars_list = list(string.ascii_letters)
random_chars=''
for i in range(6):
random_chars += random.choice(random_chars_list)
while len(ShortURL.objects.filter(short_url=random_chars)) != 0:
for i in range(6):
random_chars += random.choice(random_chars_list)
d = datetime.now()
s = ShortURL(original_url=original_website, short_url=random_chars, time_date_created=d)
s.save()
return render(request, 'urlcreated.html', {'chars':random_chars})
else:
form=CreateNewShortURL()
context={'form': form}
return render(request, 'create.html', context)
| StarcoderdataPython |
3242983 | <reponame>Fuligor/Uczenie-sie-rekonstrukcji-rozdzielczosci-obrazow-za-pomoca-sieci-glebokich<gh_stars>0
import numpy as np
from scipy import signal
from PIL import Image
def resample(hr_image):
return hr_image[range(0, hr_image.shape[0], 2)][:, range(0, hr_image.shape[1], 2)]
def downsample(hr_image, kernel):
temp = np.zeros_like(hr_image)
for i in range(temp.shape[2]):
temp[:, :, i] = signal.convolve2d(hr_image[:, :, i], kernel, mode="same", boundary="symm")
if temp.dtype != np.uint8:
temp = (temp * 255).astype(np.uint8)
temp = Image.fromarray(temp, mode='RGB')
size = (hr_image.shape[1]//2, hr_image.shape[0]//2)
lr_image = temp.resize(size=size, resample=Image.BOX)
lr_image = lr_image.resize(size=(hr_image.shape[1], hr_image.shape[0]), resample=Image.NEAREST)
return (np.array(lr_image) / 255).astype(np.float32)
def create_image_patches(image, patch_size, step):
patches = []
for i in range(0, image.shape[0] - patch_size[0], step):
for j in range(0, image.shape[1] - patch_size[1], step):
patch = image[i:i+patch_size[0], j:j+patch_size[1]]
patches.append(patch)
return patches
| StarcoderdataPython |
113560 | <reponame>kbiters/infram
from time import time, sleep
import pyautogui
from src.json import config
from src.operations.mouse import find_image_click
from src.operations.windows import start_brave
from src.service.auto_update import latest_version_check
from src.service.constants import Config, Image
from src.service.main_window import show_main_window
from src.service.menu import show_menu
from src.service.translator import select_language
from src.service.utilities import check_data_created, check_credentials, check_finish_bot
def main():
# latest_version_check() <-- Para no utilizar el auto-update
Config.LANGUAGE = select_language()
pyautogui.FAILSAFE = False
check_data_created()
check_credentials()
show_menu()
show_main_window()
time_to_repeat = config.get_time_to_repeat()
initial_time = time()
while True:
check_finish_bot(initial_time)
start_brave()
i, j, waiting = 0, time_to_repeat, True
while waiting:
find_image_click(Image.NOTIFICATION_PATH, -100, 100, -8, 8, True)
sleep(10)
i += 10
if i >= 30:
j, i = j - i, 0
if j <= 0:
waiting = False
if __name__ == "__main__":
main()
| StarcoderdataPython |
3317131 | # Copyright 2019 Intel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cyborg.common import exception
from cyborg.common.i18n import _
from cyborg.common import utils
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class NovaAPI(object):
def __init__(self):
self.nova_client = utils.get_sdk_adapter('compute')
self.nova_client.default_microversion = '2.82'
def _get_acc_changed_events(self, instance_uuid, arq_bind_statuses):
return [{'name': 'accelerator-request-bound',
'server_uuid': instance_uuid,
'tag': arq_uuid,
'status': arq_bind_status,
} for (arq_uuid, arq_bind_status) in arq_bind_statuses]
def _send_events(self, events):
"""Send events to Nova external events API.
:param events: List of events to send to Nova.
:raises: exception.InvalidAPIResponse, on unexpected error
"""
url = "/os-server-external-events"
body = {"events": events}
response = self.nova_client.post(url, json=body)
# NOTE(Sundar): Response status should always be 200/207. See
# https://review.opendev.org/#/c/698037/
if response.status_code == 200:
LOG.info("Sucessfully sent events to Nova, events: %(events)s",
{"events": events})
elif response.status_code == 207:
# NOTE(Sundar): If Nova returns per-event code of 422, that
# is due to a race condition where Nova has not associated
# the instance with a host yet. See
# https://bugs.launchpad.net/nova/+bug/1855752
events = [ev for ev in response.json()['events']]
event_codes = {ev['code'] for ev in events}
if len(event_codes) == 1: # all events have same event code
if event_codes == {422}:
LOG.info('Ignoring Nova notification error that the '
'instance %s is not yet associated with a host.',
events[0]['server_uuid'])
else:
msg = _('Unexpected event code %(code)s '
'for instance %(inst)s')
msg = msg % {'code': event_codes.pop(),
'inst': events[0]["server_uuid"]}
raise exception.InvalidAPIResponse(
service='Nova', api=url[1:], msg=msg)
else:
msg = _('All event responses are expected to '
'have the same event code. Instance: %(inst)s')
msg = msg % {'inst': events[0]['server_uuid']}
raise exception.InvalidAPIResponse(
service='Nova', api=url[1:], msg=msg)
else:
# Unexpected return code from Nova
msg = _('Failed to send events %(ev)s: HTTP %(code)s: %(txt)s')
msg = msg % {'ev': events,
'code': response.status_code,
'txt': response.text}
raise exception.InvalidAPIResponse(
service='Nova', api=url[1:], msg=msg)
def notify_binding(self, instance_uuid, arq_bind_statuses):
"""Notify Nova that ARQ bindings are resolved for a given instance.
:param instance_uuid: UUID of the instance whose ARQs are resolved
:param arq_bind_statuses: List of (arq_uuid, arq_bind_status) tuples
:returns: None
"""
events = self._get_acc_changed_events(instance_uuid, arq_bind_statuses)
self._send_events(events)
| StarcoderdataPython |
1676935 | <reponame>anconaesselmann/LiveUnit<filename>templates/general/functions.py<gh_stars>0
"""
@author <NAME>
"""
from os import sys, path
sys.path.append(path.abspath(path.join(__file__, "..", "..", "..", "classes_and_tests")))
try:
from src.Std import Std
from src.MirroredDirectory import MirroredDirectory
except ImportError:
from .src.Std import Std
from .src.MirroredDirectory import MirroredDirectory
class FunctionCollection(object):
"""@staticmethod
def getSettingNameValuePair(settings):
if not isinstance(settings, dict): # I could check for string, but I would break x-compatibility between python 2 and 3
settings = eval(settings)
for key, value in Std.getIterItems(settings):
if value is not None:
return key, value
return None, None
@staticmethod
def get_doc_block_tag(args):
settings = eval(args["settings"])
tagName, tagValue = FunctionCollection.getSettingNameValuePair(settings)
if tagValue is not None:
result = "@" + tagName + " " + tagValue
else:
result = None
return result
@staticmethod
def get_class_name(args):
result = MirroredDirectory(args["dir"]).getFile()
return result
""" | StarcoderdataPython |
152166 | <gh_stars>0
import cv2
import numpy as np
import matplotlib.pylab as plt
# 0~158 구간 임의의 수 25 x 2 생성 ---①
a = np.random.randint(0,158,(25,2))
# 98~255 구간 임의의 수 25 x 2 생성 ---②
b = np.random.randint(98, 255,(25,2))
# a, b를 병합, 50 x 2의 임의의 수 생성 ---③
trainData = np.vstack((a, b)).astype(np.float32)
# 0으로 채워진 50개 배열 생성 ---④
responses = np.zeros((50,1), np.int32)
# 25 ~ 50 까지 1로 변경 ---⑤
responses[25:] = 1
# 0과 같은 자리의 학습 데이타는 빨강색 삼각형으로 분류 및 표시 ---⑥
red = trainData[responses.ravel()==0]
plt.scatter(red[:,0],red[:,1],80,'r','^')
# 1과 같은 자리의 학습 데이타는 파랑색 사각형으로 분류 및 표시 ---⑦
blue = trainData[responses.ravel()==1]
plt.scatter(blue[:,0],blue[:,1],80,'b','s')
# 0~255 구간의 새로운 임의의 수 생성 및 초록색 원으로 표시 ---⑧
newcomer = np.random.randint(0,255,(1,2)).astype(np.float32)
plt.scatter(newcomer[:,0],newcomer[:,1],80,'g','o')
# SVM 알고리즘 객체 생성 및 훈련---⑨
svm = cv2.ml.SVM_create()
svm.trainAuto(trainData, cv2.ml.ROW_SAMPLE, responses)
# svm_random.xml 로 저장 ---⑩
svm.save('./svm_random.xml')
# 저장한 모델을 다시 읽기 ---⑪
svm2 = cv2.ml.SVM_load('./svm_random.xml')
# 새로운 임의의 수 예측 ---⑫
ret, results = svm2.predict(newcomer)
# 결과 표시 ---⑬
plt.annotate('red' if results[0]==0 else 'blue', xy=newcomer[0], xytext=(newcomer[0]+1))
print("return:%s, results:%s"%(ret, results))
plt.show() | StarcoderdataPython |
1605303 | <filename>src/exceptions.py
class BaseError(Exception):
"""
Base error for all application errors.
"""
STATUS_CODE = 500
class InvalidRequestError(BaseError):
"""
An error for any invalid API request.
"""
STATUS_CODE = 400
| StarcoderdataPython |
84696 | <filename>readchar/__init__.py<gh_stars>0
from .readchar import readchar, readkey
from . import key
__all__ = [readchar, readkey, key]
__version__ = '2.0.2'
| StarcoderdataPython |
15567 | <gh_stars>0
#!/bin/env python3
"""Handling events as tickets
The goal here is, provided a maintenance event, create an event if not a
duplicate. To determine if not duplicate, use some combination of values to
form a key. Methods to delete, update, and otherwise transform the ticket
should be available
A base class, Ticket, is provided to do some boiler plate things and enforce a
consistent interface.
"""
from textwrap import dedent
from jira import JIRA
class Ticket(object):
"""Base class for a ticket
Purpose of this is to provide standard methods for retrieving duplicates,
creating event, and deleting.
Implementation details should be self-contained to each subclass but not
really different from the interface perspective.
Attributes:
event (XMaintNoteEvent)
acconut (str)
impact (str)
maintenance_id (str)
object_id (str)
provider (str)
key (str): String that can try to be used to be unique among
maintenances
title (str): Generated title that may be used as a ticket title
body (str): Generated body thath may be used as a ticket description
ticket: Optional to add by subclass, instance of ticket in the ticket
system
"""
def __init__(self, event, **kwargs):
"""Initializes and runs _post_init()
Event is the only required input with any kwargs being accepted and
forworded to ``self._post_init``. Purpose of the ``_post_init`` method
is to facilitate each type of ticketing system to mutate the event data
in however it needs without overloading ``__init__`` itself.
A key is created using the provider, account, and maintenance-id keys
of the event. How this is implemented by a ticketing system to take
advantage of is up to the subclass.
Args:
event (XMaintNoteEvent): Maintenance Event
"""
self.event = event
self.account = event['X-MAINTNOTE-ACCOUNT']
self.impact = event['X-MAINTNOTE-IMPACT']
self.maintenance_id = event['X-MAINTNOTE-MAINTENANCE-ID']
self.object_id = event['X-MAINTNOTE-OBJECT-ID']
self.provider = event['X-MAINTNOTE-PROVIDER']
self.ticket = None
factors = [
self.provider,
self.account,
self.maintenance_id,
]
self.key = '{}:{}:{}'.format(*factors)
self.title = '{provider} {impact} Maintenance for {account}'.format(
provider=self.provider,
impact=self.impact,
account=self.account,
)
body = '''
{provider} is having a maintenance of {impact}. Affected account number
is {account}.
Start time: {start_time}
End time: {end_time}
Impact: {impact}
Account: {account}
'''.format(
provider=self.provider,
impact=self.impact,
account=self.account,
start_time=str(event['DTSTART'].dt),
end_time=str(event['DTEND'].dt),
)
self.body = dedent(body)
self._post_init(**kwargs)
def _post_init(self, **kwargs):
pass
def create(self):
"""Overload to create a ticket in the system"""
raise NotImplemented('Subclass must overload this method')
def close(self):
"""Overload to close a ticket in the system"""
raise NotImplemented('Subclass must overload this method')
def exists(self):
"""Overload to determine if this event exists in ticket form already"""
raise NotImplemented('Subclass must overload this method')
class JiraTicket(Ticket):
"""Ticket driver for JIRA
Supports adding list of watchers to maintenance issues created, custom
finishing transition for when calling close, and custom issue types.
Priorities will be mapped according to the impact status of the
maintenance. A preferred mapping can be provided otherwise it defaults to
using the Vanilla JIRA install names, eg:
>>> {
'NO-IMPACT': {'name': 'Low'},
'REDUCED-REDUNDANCY': {'name': 'Medium'},
'DEGRADED': {'name': 'High'},
'OUTAGE': {'name': 'Highest'},
}
Example:
>>> type(event)
xmaintnote.event.XMaintNoteEvent
>>> tkt = JiraTicket(
event,
url='http://localhost',
username='admin',
password='<PASSWORD>',
watchers='noc',
)
>>> tkt.exists()
False
>>> tkt.create()
True
>>> tkt.exists()
True
>>> tkt.ticket
<JIRA Issue: key=u'MAINT-14', id=u'10013'>
>>> tkt.impact
vText('NO-IMPACT')
>>> tkt.ticket.fields.priority
<JIRA Priority: name=u'Low', id=u'4'>
>>> tkt.ticket.fields.labels
[u'example.com:137.035999173:WorkOrder-31415']
"""
def _post_init(
self,
url='http://localhost:8080',
username=None,
password=<PASSWORD>,
project='MAINT',
issuetype='Task',
finished_transition='Done',
watchers=None,
pri_mapping=None,
):
"""Setup to initialize Jira client and any required settings
If username or password aren't provided, will attempt to do actions as
anonymous
Args:
url (str): URL to jira server. MUST have the URL scheme (http://)
username (str): Username (if applicable)
password (str): Password (if applicable)
project (str): JIRA project handle
issuetype (str): Issue type to file these issues as
watchers (list): List of usernames to add as watchers to the maints
finished_transition (str): Transition to move the issue into when
calling the ``.close`` method. Default: Done
pri_mapping (str): Map of maintenance impact name to JIRA priority
dict. eg, {'NO-IMPACT': {'name': 'Low'}}
"""
# If either part of the credential tuple is unprovided, default to
# anonymous
credentials = (username, password)
if not all(credentials):
basic_auth = None
else:
basic_auth = credentials
if not watchers:
watchers = []
if not pri_mapping:
pri_mapping = {
'NO-IMPACT': {'name': 'Low'},
'REDUCED-REDUNDANCY': {'name': 'Medium'},
'DEGRADED': {'name': 'High'},
'OUTAGE': {'name': 'Highest'},
}
self.jira = JIRA(url, basic_auth=basic_auth)
self.project = project
self.issuetype = issuetype
self.finished_transition = finished_transition
self.watchers = watchers
self.pri_mapping = pri_mapping
def exists(self):
"""Return bool for whether maintenance issue exists for this event
Improvements: Currently not handling the case where multiple issues are
returned which may hint that the key used isn't unique enough or people
have manually added the same label to other things. Also no exception
handling mostly because the exception return by JIRA is pretty
descriptive
Returns:
exists (bool)
"""
existing = self.jira.search_issues('labels = {}'.format(self.key))
if existing:
self.ticket = existing[0]
return True if existing else False
def create(self):
"""Create issue for event
Pre-check factors such as chehcking if this is a duplicate. If so, stop
further actions.
Returns:
success (bool)
"""
jira = self.jira
# If issue doesn't exist, create it. Else return False for inability
# Add watchers to the new ticket
if not self.exists():
options = {
'project': self.project,
'summary': self.title,
'labels': [self.key],
'description': self.body,
'issuetype': {'name': self.issuetype},
'priority': self.pri_mapping[self.impact],
}
new_issue = jira.create_issue(fields=options)
self.ticket = new_issue
[self._add_watcher(new_issue, w) for w in self.watchers]
return True
else:
return False
def close(self):
"""Return bool representing success or failure for closing issue
If issue doesn't exist, will return False because it can't close.
Returns:
success (bool)
"""
jira = self.jira
finished_transition = self.finished_transition
if self.exists():
# Fetch the transitions that we can put the current issue into.
# Search through these for the provided ``finished_transition``
# from init. If not found, raise error.
tkt = self.ticket
transitions = jira.transitions(tkt)
transition_ids = [
t['id'] for t in transitions
if t['name'] == self.finished_transition
]
if not transition_ids:
raise ValueError(
'Transition "{}" not found'.format(finished_transition)
)
t = transition_ids[0]
jira.transition_issue(tkt, t)
else:
return False
def _add_watcher(self, issue, watcher):
"""Add watcher to issue"""
self.jira.add_watcher(issue, watcher)
| StarcoderdataPython |
90268 | <reponame>kamilazdybal/PCAfold
import unittest
import numpy as np
from PCAfold import preprocess
from PCAfold import reduction
from PCAfold import analysis
class Preprocess(unittest.TestCase):
def test_preprocess__KernelDensity__allowed_calls(self):
X = np.random.rand(100,20)
try:
kerneld = preprocess.KernelDensity(X, X[:,1])
except Exception:
self.assertTrue(False)
try:
kerneld = preprocess.KernelDensity(X, X[:,4:9])
except Exception:
self.assertTrue(False)
try:
kerneld = preprocess.KernelDensity(X, X[:,0])
except Exception:
self.assertTrue(False)
try:
kerneld = preprocess.KernelDensity(X, X)
except Exception:
self.assertTrue(False)
try:
kerneld.X_weighted
kerneld.weights
except Exception:
self.assertTrue(False)
# ------------------------------------------------------------------------------
def test_preprocess__KernelDensity__not_allowed_calls(self):
X = np.random.rand(100,20)
kerneld = preprocess.KernelDensity(X, X[:,1])
with self.assertRaises(AttributeError):
kerneld.X_weighted = 1
with self.assertRaises(AttributeError):
kerneld.weights = 1
with self.assertRaises(ValueError):
kerneld = preprocess.KernelDensity(X, X[20:30,1])
with self.assertRaises(ValueError):
kerneld = preprocess.KernelDensity(X, X[20:30,:])
# ------------------------------------------------------------------------------
| StarcoderdataPython |
105940 | <reponame>mstim/ms_deisotope<gh_stars>10-100
import unittest
from ms_deisotope import processor
from ms_deisotope.averagine import glycopeptide, peptide
from ms_deisotope.scoring import PenalizedMSDeconVFitter, MSDeconVFitter
from ms_deisotope.test.common import datafile
class TestScanProcessor(unittest.TestCase):
mzml_path = datafile("three_test_scans.mzML")
missing_charge_mzml = datafile("has_missing_charge_state_info.mzML")
complex_compressed_mzml = datafile("20150710_3um_AGP_001_29_30.mzML.gz")
def test_processor(self):
proc = processor.ScanProcessor(self.mzml_path, ms1_deconvolution_args={
"averagine": glycopeptide,
"scorer": PenalizedMSDeconVFitter(5., 2.)
})
for scan_bunch in iter(proc):
self.assertIsNotNone(scan_bunch)
self.assertIsNotNone(scan_bunch.precursor)
self.assertIsNotNone(scan_bunch.products)
def test_averaging_processor(self):
proc = processor.ScanProcessor(self.mzml_path, ms1_deconvolution_args={
"averagine": glycopeptide,
"scorer": PenalizedMSDeconVFitter(5., 2.)
}, ms1_averaging=1)
for scan_bunch in iter(proc):
self.assertIsNotNone(scan_bunch)
self.assertIsNotNone(scan_bunch.precursor)
self.assertIsNotNone(scan_bunch.products)
def test_missing_charge_processing(self):
proc = processor.ScanProcessor(self.missing_charge_mzml, ms1_deconvolution_args={
"averagine": glycopeptide,
"scorer": PenalizedMSDeconVFitter(5., 2.)
})
for scan_bunch in iter(proc):
self.assertIsNotNone(scan_bunch)
self.assertIsNotNone(scan_bunch.precursor)
self.assertIsNotNone(scan_bunch.products)
for product in scan_bunch.products:
if product.precursor_information.defaulted:
candidates = scan_bunch.precursor.peak_set.between(
product.precursor_information.mz - 1, product.precursor_information.mz + 1)
assert len(candidates) == 0
def test_complex_processor(self):
proc = processor.ScanProcessor(self.complex_compressed_mzml, ms1_deconvolution_args={
"averagine": glycopeptide,
"scorer": PenalizedMSDeconVFitter(20., 2.),
"truncate_after": 0.95
}, msn_deconvolution_args={
"averagine": peptide,
"scorer": MSDeconVFitter(10.),
"truncate_after": 0.8
})
bunch = next(proc)
assert len(bunch.products) == 5
for product in bunch.products:
assert not product.precursor_information.defaulted
recalculated_precursors = {
'scanId=1740086': 4640.00074242012,
'scanId=1740149': 4786.05878475792,
'scanId=1740226': 4640.007868154431,
'scanId=1740344': 4348.90894554512,
'scanId=1740492': 5005.1329902247435
}
for product in bunch.products:
mass = product.precursor_information.extracted_neutral_mass
self.assertAlmostEqual(mass, recalculated_precursors[product.id], 2)
proc.start_from_scan("scanId=1760847")
bunch = next(proc)
recalculated_precursors = {
'scanId=1761168': 4640.01972225792,
'scanId=1761235': 4640.019285920238,
'scanId=1761325': 4786.07251976387,
'scanId=1761523': 4696.016295197582,
'scanId=1761804': 986.58798612896
}
for product in bunch.products:
mass = product.precursor_information.extracted_neutral_mass
self.assertAlmostEqual(mass, recalculated_precursors[product.id], 2)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1761306 | <reponame>magikid/UASTrafficLightMk2
#!/usr/bin/python3
print('script start')
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
seq = ['red', 'yellow', 'green', 'yellow',]
pins = {
2 : {'name' : 'red', 'state' : GPIO.LOW},
3 : {'name' : 'green', 'state' : GPIO.LOW},
4 : {'name' : 'yellow', 'state' : GPIO.LOW}
}
for pin in pins:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, GPIO.LOW)
#color = input('Color? ')
def light_off():
for i in pins:
GPIO.output(i, GPIO.LOW)
def light_on(color):
for i in pins:
if pins[i]['name'] == color:
GPIO.output(i, GPIO.HIGH)
time.sleep(.1)
GPIO.output(i, GPIO.LOW)
def sequence(seq):
while True:
for i in seq:
light_on(i)
#light_on(color)
sequence(seq)
print('script end')
| StarcoderdataPython |
4835393 | from __future__ import absolute_import
from __future__ import division
import unittest
import pandas as pd
from collections import Counter
from mock import MagicMock
import pyspark.sql
from pyspark.sql import Row
from affirm.model_interpretation.shparkley.spark_shapley import (
compute_shapley_score,
compute_shapley_for_sample,
ShparkleyModel
)
class TestShparkleyModel(ShparkleyModel):
def get_required_features(self):
return self._model.get_required_features()
def predict(self, feature_matrix):
pd_df = pd.DataFrame.from_dict(feature_matrix)
preds = self._model.predict(pd_df)
return preds
def model_predict_side_effect_function(df):
df['score'] = (df['f1'] * 3 + df['f2'] * 5)
return df['score'].values
class SparkShapleyTest(unittest.TestCase):
def setUp(self):
self.m_model = MagicMock()
self.m_model.get_required_features.return_value = ['f1', 'f2']
self.m_model.predict.side_effect = model_predict_side_effect_function
self.row1 = {
'f1': 0.01,
'f2': 0.05,
}
self.row2 = {
'f1': 0.2,
'f2': 0.5,
}
self.row3 = {
'f1': 1.0,
'f2': 0.5,
}
self.row_investigate = {
'f1': 7.0,
'f2': 6.5,
}
self.m_shparkley_model = TestShparkleyModel(self.m_model)
builder = pyspark.sql.SparkSession.builder.master('local[1]')
self.spark = builder.appName('unittest').getOrCreate()
def tearDown(self):
if self.spark is not None:
self.spark.stop()
def test_compute_shapley_for_sample(self):
dataset = self.spark.createDataFrame([self.row1, self.row2, self.row3])
shapley_scores = compute_shapley_for_sample(
df=dataset,
model=self.m_shparkley_model,
row_to_investigate=Row(**self.row_investigate)
)
sorted_shapley_scores = sorted([(k, v) for k, v in shapley_scores.items()])
self.assertEquals(sorted_shapley_scores, [('f1', 19.79), ('f2', 30.75)])
def test_compute_shapley_for_sample_weighted(self):
# Add weights: [2, 1, 1]
self.row1.update({'weight': 2})
for row in (self.row2, self.row3):
row.update({'weight': 1})
dataset = self.spark.createDataFrame([self.row1, self.row2, self.row3])
shapley_scores = compute_shapley_for_sample(
df=dataset,
model=self.m_shparkley_model,
row_to_investigate=Row(**self.row_investigate),
weight_col_name='weight',
)
sorted_shapley_scores = sorted([(k, v) for k, v in shapley_scores.items()])
# row 1, which has small feature vals (=> smaller prediction) weighted more heavily means that compared to
# baseline, the relatively large features in the sample of interest (=> bigger prediction => more different
# from row 1) will increase both shapley values to the below compared to unweighted (19.79, 30.75)
self.assertEquals(sorted_shapley_scores, [('f1', 20.085), ('f2', 31.125)])
def test_compute_shapley_score(self):
row_samples = [Row(**self.row1), Row(**self.row2), Row(**self.row3)]
scores = compute_shapley_score(
partition_index=1,
rand_rows=row_samples,
row_to_investigate=Row(**self.row_investigate),
model=self.m_shparkley_model
)
expected_result = [
('f1', 18.0, 1.0),
('f1', 20.4, 1.0),
('f1', 20.97, 1.0),
('f2', 30.0, 1.0),
('f2', 30.0, 1.0),
('f2', 32.25, 1.0)
]
self.assertEqual(sorted(list(scores)), expected_result)
def test_compute_shapley_score_weighted(self):
# Get unweighted result for reference
row_samples = [Row(**self.row1), Row(**self.row2), Row(**self.row3)]
unweighted_result = compute_shapley_score(
partition_index=1,
rand_rows=row_samples,
row_to_investigate=Row(**self.row_investigate),
model=self.m_shparkley_model
)
unweighted_result = sorted(list(unweighted_result))
# Add weights: [2, 1, 1]
self.row1.update({'weight': 2})
for row in (self.row2, self.row3):
row.update({'weight': 1})
row_samples__weighted_first_double = [Row(**self.row1), Row(**self.row2), Row(**self.row3)]
weighted_first_double_result = compute_shapley_score(
partition_index=1,
rand_rows=row_samples__weighted_first_double,
row_to_investigate=Row(**self.row_investigate),
model=self.m_shparkley_model,
weight_col_name='weight',
)
weights_found = Counter()
for (ft_name_uw, ft_val_uw, ft_val_weight_uw), (ft_name_double, ft_val_double, ft_val_weight) in zip(
unweighted_result,
sorted(list(weighted_first_double_result))
):
self.assertEqual(ft_name_uw, ft_name_double)
self.assertEqual(ft_val_uw, ft_val_double)
weights_found[ft_val_weight] += 1
# Four (two rows times two features) of \phi_j^ms of weight 1, two (one rwo times two features) of
# weight 2
self.assertDictEqual(weights_found, {1.0: 4, 2.0: 2})
def test_efficiency_property(self):
# The Shapley value must satisfy the Efficiency property
# The feature contributions must add up to the difference of prediction for x and the average.
dataset = self.spark.createDataFrame([self.row1, self.row2, self.row3])
shapley_scores = compute_shapley_for_sample(
df=dataset,
model=self.m_shparkley_model,
row_to_investigate=Row(**self.row_investigate)
)
total_shapley_value = sum([v for _, v in shapley_scores.items()])
predicted_value_for_row = model_predict_side_effect_function(
pd.DataFrame.from_dict([self.row_investigate])
)
rows = [self.row1, self.row2, self.row3]
scores = model_predict_side_effect_function(pd.DataFrame.from_dict(rows))
mean_prediction_on_dataset = sum(scores)/len(rows)
self.assertAlmostEquals(
first=total_shapley_value,
second=predicted_value_for_row - mean_prediction_on_dataset,
delta=0.01
)
| StarcoderdataPython |
1775451 | from setuptools import setup, find_packages
from setuptools.extension import Extension
from glob import glob
import numpy
from Cython.Distutils import build_ext
ext_modules = [
Extension("taggd.core.demultiplex_core_functions", ["taggd/core/demultiplex_core_functions.pyx"]),
Extension("taggd.core.demultiplex_sub_functions", ["taggd/core/demultiplex_sub_functions.pyx"]),
Extension("taggd.core.demultiplex_search_functions", ["taggd/core/demultiplex_search_functions.pyx"]),
Extension("taggd.core.match", ["taggd/core/match.pyx"]),
Extension("taggd.core.match_type", ["taggd/core/match_type.pyx"]),
Extension("taggd.core.statistics", ["taggd/core/statistics.pyx"]),
Extension("taggd.misc.distance_metrics", ["taggd/misc/distance_metrics.pyx"]),
Extension("taggd.misc.kmer_utils", ["taggd/misc/kmer_utils.pyx"]),
Extension("taggd.io.fastq_utils", ["taggd/io/fastq_utils.pyx"]),
Extension("taggd.io.barcode_utils", ["taggd/io/barcode_utils.pyx"]),
Extension("taggd.io.record", ["taggd/io/record.pyx"]),
Extension("taggd.io.sam_record", ["taggd/io/sam_record.pyx"]),
Extension("taggd.io.fasta_record", ["taggd/io/fasta_record.pyx"]),
Extension("taggd.io.fastq_record", ["taggd/io/fastq_record.pyx"]),
Extension("taggd.io.reads_reader_writer", ["taggd/io/reads_reader_writer.pyx"])
]
cmdclass = { 'build_ext': build_ext }
setup(
name = "taggd",
version = '0.3.6',
author = '<NAME>, <NAME>',
author_email = '<EMAIL>, <EMAIL>',
license = 'Open BSD',
description = 'Bioinformatics genetic barcode demultiplexing',
url = 'https://github.com/SpatialTranscriptomicsResearch/taggd',
download_url = 'https://github.com/SpatialTranscriptomicsResearch/taggd/0.3.2',
scripts = glob("scripts/*.py"),
packages = ['taggd', 'taggd.core', 'taggd.io', 'taggd.misc'],
package_data = {'': ['*.pyx', '*.pxd', '*.h', '*.c'], },
setup_requires = ["cython"],
install_requires = [
'setuptools',
'pysam',
'numpy',
],
test_suite = 'tests',
cmdclass = cmdclass,
ext_modules = ext_modules,
include_dirs = [numpy.get_include(), '.'],
keywords = ['bioinformatics', 'demultiplexing']
) | StarcoderdataPython |
4809195 | <filename>Comments/models.py<gh_stars>1-10
from django.contrib.auth import get_user_model
from django.db import models
User = get_user_model()
class Comments(models.Model):
parent_comment = models.ForeignKey('self', null=True, on_delete=models.CASCADE)
parent_reply = models.ForeignKey('Reviews.Review', null=True, on_delete=models.CASCADE)
comments_replies = models.ManyToManyField('self')
author = models.ForeignKey(User, null=True, on_delete=models.CASCADE)
body = models.TextField(max_length=200)
| StarcoderdataPython |
4833155 | <reponame>hpharmsen/pysimplicate<gh_stars>0
import datetime
from beautiful_date import *
# Fetches all contracts
def contract(self, filter={}):
url = '/hrm/contract'
fields = {'employee_name': 'employee.name'}
result = self.composed_call(url, fields, filter)
return result
def employee(self, filter={}):
url = '/hrm/employee'
fields = {
'full_name': 'person.full_name',
'name': 'name',
'status': 'status.label',
'work_email': 'work_email',
'function': 'function',
}
result = self.composed_call(url, fields, filter)
return result
# Time tables
def timetable(self, filter={}):
url = '/hrm/timetable'
fields = {
'employee_name': 'employee.name',
'start_date': 'start_date',
'end_date': 'start_date',
}
return self.composed_call(url, fields, filter)
# self.check_filter('timetable', fields, filter)
# for field, extended_field in fields.items():
# if field in filter.keys():
# value = filter[field]
# operator = ''
# if field == 'start_date':
# operator = 'ge'
# elif field == 'end_date':
# operator = 'le'
# url = self.add_url_param(url, extended_field, value, operator)
#
# result = self.call(url)
# return result
def timetable_simple(self, employee_name):
# in: employee name
# out: [(8, 8), (8, 8), (8, 8), (8, 8), (8, 8), (0, 0), (0, 0)]
table = self.timetable({'employee_name': employee_name})[-1]
res = [(table['even_week'][f'day_{i}']['hours'], table['odd_week'][f'day_{i}']['hours']) for i in range(1, 8)]
return res
def timetable_today(self, employee_name):
day_of_week = datetime.datetime.today().weekday() # weekday is 0 for Monday
week_number = datetime.datetime.today().isocalendar()[1]
index = week_number % 2
table = self.timetable_simple(employee_name)
res = table[day_of_week][index]
return res
# Fetches all leave types
# Returns list of {id, employee, start_date, end_date, year, description}
def leave(self, filter={}):
url = '/hrm/leave'
fields = {
'employee_name': 'employee.name',
'year': 'year',
'leavetype_label': 'leavetype.label',
'affects_balance': 'leavetype.affects_balance',
'start_date': 'start_date',
'end_date': 'start_date',
}
return self.composed_call(url, fields, filter)
def leave_simple(self, filter={}):
# Returns list of {employee_name, start_date, days, description}
leaves = leave(self, filter)
if not leaves:
return False
res = []
for l in leaves:
if not l.get('start_date'):
continue
start = _to_date(l['start_date'])
days = l['hours'] / 8
res += [
{
'id': l['id'],
'name': l['employee']['name'],
'start_day': start[:],
'days': days,
'description': l['description'],
}
]
return res
def _to_date(date: str):
y, m, d = date.split()[0].split('-')
return BeautifulDate(int(y), int(m), int(d))
# Fetches all leave types
# Returns list of {id, label, blocked, color, affects_balance}
def leavetype(self, show_blocked=False):
url = '/hrm/leavetype'
if not show_blocked:
url = self.add_url_param(url, 'blocked', 'False')
result = self.call(url)
return result
# Fetches all leave balances for employees
# Returns list of {employee (id, name), balance (in hours), year, leave_type (id, label)}
def leavebalance(self, filter={}):
url = '/hrm/leavebalance'
fields = {
'employee_name': 'employee.name',
'year': 'year',
'leavetype_label': 'leavetype.label',
'affects_balance': 'leavetype.affects_balance',
}
return self.composed_call(url, fields, filter)
def absence(self, filter={}):
url = '/hrm/absence'
fields = {
'employee_name': 'employee.name',
'year': 'year',
'start_date': 'start_date',
'end_date': 'end_date',
}
return self.composed_call(url, fields, filter)
| StarcoderdataPython |
3345774 | <reponame>sharonwoo/prophet
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os.path
import platform
import sys
import os
from pkg_resources import (
normalize_path,
working_set,
add_activation_listener,
require,
)
from setuptools import setup, find_packages
from setuptools.command.build_py import build_py
from setuptools.command.develop import develop
from setuptools.command.test import test as test_command
from typing import List
PLATFORM = 'unix'
if platform.platform().startswith('Win'):
PLATFORM = 'win'
MODEL_DIR = os.path.join('stan', PLATFORM)
MODEL_TARGET_DIR = os.path.join('fbprophet', 'stan_model')
def get_backends_from_env() -> List[str]:
from fbprophet.models import StanBackendEnum
return os.environ.get("STAN_BACKEND", StanBackendEnum.PYSTAN.name).split(",")
def build_models(target_dir):
from fbprophet.models import StanBackendEnum
for backend in get_backends_from_env():
StanBackendEnum.get_backend_class(backend).build_model(target_dir, MODEL_DIR)
class BuildPyCommand(build_py):
"""Custom build command to pre-compile Stan models."""
def run(self):
if not self.dry_run:
target_dir = os.path.join(self.build_lib, MODEL_TARGET_DIR)
self.mkpath(target_dir)
build_models(target_dir)
build_py.run(self)
class DevelopCommand(develop):
"""Custom develop command to pre-compile Stan models in-place."""
def run(self):
if not self.dry_run:
target_dir = os.path.join(self.setup_path, MODEL_TARGET_DIR)
self.mkpath(target_dir)
build_models(target_dir)
develop.run(self)
class TestCommand(test_command):
user_options = [
('test-module=', 'm', "Run 'test_suite' in specified module"),
('test-suite=', 's',
"Run single test, case or suite (e.g. 'module.test_suite')"),
('test-runner=', 'r', "Test runner to use"),
('test-slow', 'w', "Test slow suites (default off)"),
]
def initialize_options(self):
super(TestCommand, self).initialize_options()
self.test_slow = False
def finalize_options(self):
super(TestCommand, self).finalize_options()
if self.test_slow is None:
self.test_slow = getattr(self.distribution, 'test_slow', False)
"""We must run tests on the build directory, not source."""
def with_project_on_sys_path(self, func):
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
sys.path.insert(0, normalize_path(ei_cmd.egg_base))
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
func()
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
with open('README.md', 'r', encoding='utf-8') as f:
long_description = f.read()
with open('requirements.txt', 'r') as f:
install_requires = f.read().splitlines()
setup(
name='fbprophet',
version='0.7.1',
description='Automatic Forecasting Procedure',
url='https://facebook.github.io/prophet/',
author='<NAME> <<EMAIL>>, <NAME> <<EMAIL>>',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(),
setup_requires=[
],
install_requires=install_requires,
python_requires='>=3',
zip_safe=False,
include_package_data=True,
cmdclass={
'build_py': BuildPyCommand,
'develop': DevelopCommand,
'test': TestCommand,
},
test_suite='fbprophet.tests',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
],
long_description=long_description,
long_description_content_type='text/markdown',
)
| StarcoderdataPython |
1670340 | <filename>tests/models/test_base_operations.py<gh_stars>0
import time
import pytest
from libdev.gen import generate
from . import Base, Attribute
from consys.errors import ErrorWrong
class ObjectModel(Base):
_name = 'tests'
meta = Attribute(types=str)
delta = Attribute(types=str, default='')
extra = Attribute(types=str, default=lambda instance: f'u{instance.delta}o')
multi = Attribute(types=list, default=[])
sodzu = Attribute(types=dict)
class ObjectModel2(Base):
_name = 'tests2'
id = Attribute(types=str)
def test_load():
now = time.time()
instance = ObjectModel(
title='test_load',
user=2,
status=3,
meta='onigiri',
delta='hinkali',
extra='ramen',
)
instance.save()
recieved = ObjectModel.get(ids=instance.id)
assert isinstance(recieved, ObjectModel)
assert recieved.id == instance.id
assert recieved.title == 'test_load'
assert instance.created < now + 1
assert instance.updated < now + 1
assert instance.user == 2
assert instance.status == 3
assert recieved.meta == 'onigiri'
assert recieved.delta == 'hinkali'
assert recieved.extra == 'ramen'
def test_load_zero():
with pytest.raises(ErrorWrong):
ObjectModel.get(0)
with pytest.raises(ErrorWrong):
ObjectModel.get([])
def test_load_unknown():
assert ObjectModel.get(delta='ola') == []
def test_load_complex_fields():
instance = ObjectModel(
sodzu={
'sake': [
'onigiri',
'hinkali',
'ramen',
]
},
)
instance.save()
instances = ObjectModel.get(extra={
'sodzu.sake': 'ramen',
})
instances = {
ins.id: ins for ins in instances
}
assert instances[instance.id]
def test_system_fields():
instance = ObjectModel(
title='test_system_fields',
meta='onigiri',
)
instance.save()
instance = ObjectModel.get(ids=instance.id, fields={})
assert instance.json().get('_loaded_values') is None
instances = ObjectModel.get(search='')
assert all(
instance.json().get('_loaded_values') is None
for instance in instances
)
def test_list():
now = time.time()
instance1 = ObjectModel(
title='test_list',
user=2,
status=3,
meta='onigiri',
delta='hinkali',
extra='ramen',
)
instance1.save()
instance2 = ObjectModel()
instance2.save()
recieved = ObjectModel.get(ids=(
instance1.id,
instance2.id,
))
assert isinstance(recieved, list)
with recieved[1] as recieved1:
assert isinstance(recieved1, ObjectModel)
assert recieved1.id == instance1.id
assert recieved1.title == 'test_list'
assert recieved1.created < now + 1
assert recieved1.updated < now + 1
assert recieved1.user == 2
assert recieved1.status == 3
assert recieved1.meta == 'onigiri'
assert recieved1.delta == 'hinkali'
assert recieved1.extra == 'ramen'
with recieved[0] as recieved2:
assert isinstance(recieved2, ObjectModel)
assert recieved2.id == instance2.id
assert recieved2.created < now + 1
assert recieved2.updated < now + 1
def test_update():
instance = ObjectModel(
title='test_create',
delta='hinkali',
)
instance.save()
assert instance.title == 'test_create'
assert instance.meta is None
assert instance.delta == 'hinkali'
instance_id = instance.id
instance = ObjectModel.get(ids=instance_id)
instance.title = 'test_update'
instance.meta = 'onigiri'
instance.save()
assert instance_id == instance.id
instance = ObjectModel.get(ids=instance.id)
assert instance.title == 'test_update'
assert instance.meta == 'onigiri'
assert instance.delta == 'hinkali'
def test_update_empty():
instance = ObjectModel(
title='test_create',
meta='onigiri',
)
instance.save()
assert instance.title == 'test_create'
assert instance.meta == 'onigiri'
instance_id = instance.id
instance = ObjectModel.get(ids=instance_id)
instance.title = None
instance.save()
assert instance_id == instance.id
instance = ObjectModel.get(ids=instance.id)
assert instance.title == 'test_create'
assert instance.meta == 'onigiri'
def test_update_resave():
instance = ObjectModel(
title='test_create',
delta='hinkali'
)
instance.save()
instance_id = instance.id
instance.title = 'test_update'
instance.meta = 'onigiri'
instance.save()
assert instance_id == instance.id
instance = ObjectModel.get(ids=instance.id)
assert instance.title == 'test_update'
assert instance.meta == 'onigiri'
assert instance.delta == 'hinkali'
def test_rm():
instance = ObjectModel()
instance.save()
instance.rm()
with pytest.raises(ErrorWrong):
ObjectModel.get(ids=instance.id)
def test_rm_nondb():
instance = ObjectModel()
with pytest.raises(ErrorWrong):
instance.rm()
def test_rm_attr():
instance = ObjectModel(
meta='onigiri',
delta='hinkali',
)
instance.save()
instance = ObjectModel.get(ids=instance.id)
del instance.meta
instance.delta = 'hacapuri'
instance.save()
instance = ObjectModel.get(ids=instance.id)
assert instance.meta is None
assert instance.delta == 'hacapuri'
def test_rm_attr_resave():
instance = ObjectModel(
title='test_attr_resave',
meta='onigiri',
delta='hinkali',
)
instance.save()
del instance.meta
instance.delta = 'hacapuri'
instance.save()
instance = ObjectModel.get(ids=instance.id)
assert instance.title == 'test_attr_resave'
assert instance.meta is None
assert instance.delta == 'hacapuri'
def test_reload():
instance = ObjectModel(
delta='hinkali',
)
instance.save()
recieved1 = ObjectModel.get(ids=instance.id)
recieved2 = ObjectModel.get(ids=instance.id)
assert recieved1._specified_fields is None
assert recieved2._specified_fields is None
recieved1.delta = 'hacapuri'
recieved1.save()
assert recieved1.delta == 'hacapuri'
assert recieved2.delta == 'hinkali'
recieved2.reload()
assert recieved2._specified_fields is None
assert recieved2.id == recieved1.id == instance.id
assert recieved2.delta == 'hacapuri'
recieved1.reload()
assert recieved1._specified_fields is None
assert recieved1.id == recieved1.id == instance.id
assert recieved1.delta == 'hacapuri'
def test_resave():
instance = ObjectModel(
delta='hinkali',
)
instance.save()
updated = instance.updated
instance.save()
assert instance.updated == updated
def test_complex():
instance = ObjectModel(
meta='onigiri',
delta='hinkali',
)
instance.save()
def handler(obj):
obj['teta'] = obj['meta'].upper()
return obj
recieved = ObjectModel.complex(
ids=instance.id,
fields={'id', 'meta'},
handler=handler,
)
assert recieved == {
'id': instance.id,
'meta': 'onigiri',
'teta': 'ONIGIRI',
}
def test_to_default():
instance = ObjectModel(
delta='hinkali',
multi=[1, 2, 3],
)
instance.save()
instances = ObjectModel.get(delta={'$exists': False}, fields={'id'})
assert instance.id not in [i.id for i in instances]
instances = ObjectModel.get(multi={'$exists': False}, fields={'id'})
assert instance.id not in [i.id for i in instances]
instance.delta = ''
instance.save()
assert instance.delta == ''
assert instance.multi == [1, 2, 3]
instances = ObjectModel.get(delta={'$exists': False}, fields={'id'})
assert instance.id in [i.id for i in instances]
instance = ObjectModel.get(instance.id)
assert instance.delta == ''
assert instance.multi == [1, 2, 3]
instance.multi = []
instance.save()
assert instance.multi == []
instances = ObjectModel.get(multi={'$exists': False}, fields={'id'})
assert instance.id in [i.id for i in instances]
def test_wrong_ids():
instance1 = ObjectModel2(id=generate())
instance2 = ObjectModel2(id=generate())
instance3 = ObjectModel2(id=generate())
instance1.save()
instance3.save()
instances = ObjectModel2.get({
instance1.id,
instance2.id,
instance3.id,
})
assert len(instances) == 2
def test_get_with_none_field():
instance = ObjectModel(meta='hinkali')
instance.save()
instance = ObjectModel.get(
ids=instance.id,
meta=None,
)
assert instance
| StarcoderdataPython |
90358 | <gh_stars>10-100
"""
CausalDAG
=========
CausalDAG is a Python package for the creation, manipulation, and learning of Causal DAGs.
Simple Example
--------------
>>> from causaldag import rand, partial_correlation_suffstat, partial_correlation_test, MemoizedCI_Tester, gsp
>>> import numpy as np
>>> np.random.seed(12312)
>>> nnodes = 5
>>> nodes = set(range(nnodes))
>>> dag = rand.directed_erdos(nnodes, .5)
>>> gdag = rand.rand_weights(dag)
>>> samples = gdag.sample(100)
>>> suffstat = partial_correlation_suffstat(samples)
>>> ci_tester = MemoizedCI_Tester(partial_correlation_test, suffstat, alpha=1e-3)
>>> est_dag = gsp(nodes, ci_tester)
>>> dag.shd_skeleton(est_dag)
3
License
-------
Released under the 3-Clause BSD license::
Copyright (C) 2018
<NAME> <<EMAIL>>
"""
# from .loaders import *
# from . import utils
from conditional_independence import *
from graphical_models import *
import graphical_models.rand as rand
from graphical_model_learning import *
| StarcoderdataPython |
12423 | # --------------------------------------------------------
# (c) Copyright 2014 by <NAME>.
# Licensed under BSD 3-clause licence.
# --------------------------------------------------------
import unittest
from pymonad.Maybe import Maybe, Just, First, Last, _Nothing, Nothing
from pymonad.Reader import curry
from pymonad.test.MonadTester import *
from pymonad.test.MonoidTester import *
class TestJustFunctor(unittest.TestCase, MonadTester):
def __init__(self, x):
super(TestJustFunctor, self).__init__(x)
self.setClassUnderTest(Just)
def testFunctorLaws(self):
self.given(8)
self.ensure_first_functor_law_holds()
self.ensure_second_functor_law_holds()
class TestNothingFunctor(unittest.TestCase, MonadTester):
def __init__(self, x):
super(TestNothingFunctor, self).__init__(x)
self.setClassUnderTest(_Nothing)
def testFunctorLaws(self):
self.given(None)
self.ensure_first_functor_law_holds()
self.ensure_second_functor_law_holds()
class TestJustApplicative(unittest.TestCase, MonadTester):
def __init__(self, x):
super(TestJustApplicative, self).__init__(x)
self.setClassUnderTest(Just)
def testApplicativeLaws(self):
self.given(8)
self.ensure_first_applicative_law_holds()
self.ensure_second_applicative_law_holds()
self.ensure_third_applicative_law_holds()
self.ensure_fourth_applicative_law_holds()
self.ensure_fifth_applicative_law_holds()
class TestNothingApplicative(unittest.TestCase, MonadTester):
def __init__(self, x):
super(TestNothingApplicative, self).__init__(x)
self.setClassUnderTest(_Nothing)
def testApplicativeLaws(self):
self.given(None)
self.ensure_first_applicative_law_holds()
self.ensure_second_applicative_law_holds()
self.ensure_third_applicative_law_holds()
self.ensure_fourth_applicative_law_holds()
self.ensure_fifth_applicative_law_holds()
class TestJustMonad(unittest.TestCase, MonadTester):
def __init__(self, x):
super(TestJustMonad, self).__init__(x)
self.setClassUnderTest(Just)
def monad_function_f(self, x):
return Just(x + 10)
def monad_function_g(self, x):
return Just(x * 5)
def testMonadLaws(self):
self.given(8)
self.ensure_first_monad_law_holds()
self.ensure_second_monad_law_holds()
self.ensure_third_monad_law_holds()
class TestNothingMonad(unittest.TestCase, MonadTester):
def __init__(self, x):
super(TestNothingMonad, self).__init__(x)
self.setClassUnderTest(_Nothing)
def monad_function_f(self, x):
return Just(x + 10)
def monad_function_g(self, x):
return Just(x * 5)
def testMonadLaws(self):
self.given(None)
self.ensure_first_monad_law_holds()
self.ensure_second_monad_law_holds()
self.ensure_third_monad_law_holds()
class TestMaybeEquality(unittest.TestCase, MonadTester):
def testEqualityOfIdenticalTypes(self):
self.givenMonads(Just(8), Just(8))
self.ensureMonadsAreEqual()
def testInequalityOfIdenticalTypes(self):
self.givenMonads(Just(8), Just(9))
self.ensureMonadsAreNotEqual()
def testInequalityOfJustAndNothing(self):
self.givenMonads(Just(8), Nothing)
self.ensureMonadsAreNotEqual()
def testMonadComparisonExceptionWithJust(self):
self.givenMonads(Just(8), Reader(8))
self.ensureComparisonRaisesException()
def testMonadComparisonExceptionWithNothing(self):
self.givenMonads(Nothing, Reader(8))
self.ensureComparisonRaisesException()
class TestMaybeMonoid(unittest.TestCase, MonoidTester):
def test_mzero(self):
self.givenMonoid(Maybe)
self.get_mzero()
self.ensure_mzero_is(Nothing)
def test_right_identity(self):
self.givenMonoid(Just(9))
self.ensure_monoid_plus_zero_equals(Just(9))
def test_left_identity(self):
self.givenMonoid(Just(9))
self.ensure_zero_plus_monoid_equals(Just(9))
def test_associativity(self):
self.givenMonoids(Just(1), Just(2), Just(3))
self.ensure_associativity()
def test_mplus_with_two_just_values(self):
self.givenMonoids(Just(1), Just(2))
self.ensure_mconcat_equals(Just(3))
def test_mplus_with_one_just_and_one_nothing(self):
self.givenMonoids(Just(1), Nothing)
self.ensure_mconcat_equals(Just(1))
class TestFirstMonoid(unittest.TestCase, MonoidTester):
def test_mzero(self):
self.givenMonoid(First)
self.get_mzero()
self.ensure_mzero_is(First(Nothing))
def test_right_identity(self):
self.givenMonoid(First(Just(9)))
self.ensure_monoid_plus_zero_equals(First(Just(9)))
def test_left_identity(self):
self.givenMonoid(First(Just(9)))
self.ensure_zero_plus_monoid_equals(First(Just(9)))
def test_associativity(self):
self.givenMonoids(First(Just(1)), First(Just(2)), First(Just(3)))
self.ensure_associativity()
def test_mplus_with_two_just_values(self):
self.givenMonoids(First(Just(1)), First(Just(2)))
self.ensure_mconcat_equals(First(Just(1)))
def test_mplus_with_just_and_nothing(self):
self.givenMonoids(First(Just(1)), Nothing)
self.ensure_mconcat_equals(First(Just(1)))
def test_mplus_with_nothing_and_just(self):
self.givenMonoids(Nothing, First(Just(1)))
self.ensure_mconcat_equals(First(Just(1)))
class TestLastMonoid(unittest.TestCase, MonoidTester):
def test_mzero(self):
self.givenMonoid(Last)
self.get_mzero()
self.ensure_mzero_is(Last(Nothing))
def test_right_identity(self):
self.givenMonoid(Last(Just(9)))
self.ensure_monoid_plus_zero_equals(Last(Just(9)))
def test_left_identity(self):
self.givenMonoid(Last(Just(9)))
self.ensure_zero_plus_monoid_equals(Last(Just(9)))
def test_associativity(self):
self.givenMonoids(Last(Just(1)), Last(Just(2)), Last(Just(3)))
self.ensure_associativity()
def test_mplus_with_two_just_values(self):
self.givenMonoids(Last(Just(1)), Last(Just(2)))
self.ensure_mconcat_equals(Last(Just(2)))
def test_mplus_with_just_and_nothing(self):
self.givenMonoids(Last(Just(1)), Nothing)
self.ensure_mconcat_equals(Last(Just(1)))
def test_mplus_with_nothing_and_just(self):
self.givenMonoids(Nothing, Last(Just(1)))
self.ensure_mconcat_equals(Last(Just(1)))
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3235708 | from app import ResponseBuilder, logger
from static import strings
def handle_help_intent(request):
"""
Generate response to intent type HelpIntent which presents the available futures to the confused user.
:type request AlexaRequest
:return: JSON response including introduced capabilities of the skill
"""
logger.info("Help requested by the user")
message = strings.INTENT_HELP
reprompt_message = strings.INTENT_GENERAL_REPROMPT
return ResponseBuilder.create_response(request, message=message)\
.with_reprompt(reprompt_message)
| StarcoderdataPython |
64466 | <gh_stars>10-100
import os
import numpy as np
import pickle
from datetime import date
today = date.today()
class save_info(object):
def __init__(self, assets_dir, exp_num, exp_name, env_name):
self.assets_dir = assets_dir
self.experiment_num = 'exp-{}'.format(exp_num)
#common path
self.saving_path = 'learned_models/{}/{}-{}-{}'.format(exp_name, today, self.experiment_num, env_name)
def create_all_paths(self):
"""create all the paths to save learned models/data"""
#model saving path
self.model_saving_path = os.path.join(self.assets_dir, self.saving_path, 'model.p')
if not os.path.exists(os.path.dirname(self.model_saving_path)):
try:
os.makedirs(os.path.dirname(self.model_saving_path))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
#intermediate model saving path
self.intermediate_model_saving_path = os.path.join(self.assets_dir, self.saving_path, 'intermediate_model/')
if not os.path.exists(os.path.dirname(self.intermediate_model_saving_path)):
try:
os.makedirs(os.path.dirname(self.intermediate_model_saving_path))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
#avg reward saving path
self.avg_reward_saving_path = os.path.join(self.assets_dir, self.saving_path, 'avg_reward.p')
# num of steps saving path
self.num_of_steps_saving_path = os.path.join(self.assets_dir, self.saving_path, 'num_of_steps.p')
# num of episodes saving path
self.num_of_episodes_saving_path = os.path.join(self.assets_dir, self.saving_path, 'num_of_episodes.p')
# total num of episodes saving path
self.total_num_of_episodes_saving_path = os.path.join(self.assets_dir, self.saving_path, 'total_num_of_episodes.p')
# total steps num saving path
self.total_num_of_steps_saving_path = os.path.join(self.assets_dir, self.saving_path, 'total_num_of_steps.p')
# total num of steps saving path
self.rewards_std_saving_path = os.path.join(self.assets_dir, self.saving_path, 'rewards_std.p')
# total num of steps saving path
self.env_avg_reward_saving_path = os.path.join(self.assets_dir, self.saving_path, 'env_avg_reward.p')
# v loss saving path
self.true_v_loss_list_saving_path = os.path.join(self.assets_dir, self.saving_path, 'true_v_loss_list.p')
# decayed v loss saving path
self.decayed_v_loss_list_saving_path = os.path.join(self.assets_dir, self.saving_path, 'decayed_v_loss_list.p')
# p loss saving path
self.p_loss_list_saving_path = os.path.join(self.assets_dir, self.saving_path, 'p_loss_list.p')
# evaluation average reward
self.eval_avg_R_saving_path = os.path.join(self.assets_dir, self.saving_path, 'eval_avg_R.p')
# evaluation average reward std
self.eval_avg_R_std_saving_path = os.path.join(self.assets_dir, self.saving_path, 'eval_avg_R_std.p')
def dump_lists(self, avg_reward, num_of_steps, num_of_episodes, total_num_episodes, total_num_steps, rewards_std, env_avg_reward, v_loss_list, p_loss_list, eval_avg_reward, eval_avg_reward_std):
# dump expert_avg_reward, num_of_steps, num_of_episodes
pickle.dump(avg_reward,
open(os.path.join(self.assets_dir, self.avg_reward_saving_path),'wb'))
pickle.dump(num_of_steps,
open(os.path.join(self.assets_dir, self.num_of_steps_saving_path),'wb'))
pickle.dump(num_of_episodes,
open(os.path.join(self.assets_dir, self.num_of_episodes_saving_path), 'wb'))
pickle.dump(total_num_episodes,
open(os.path.join(self.assets_dir, self.total_num_of_episodes_saving_path), 'wb'))
pickle.dump(total_num_steps,
open(os.path.join(self.assets_dir, self.total_num_of_steps_saving_path), 'wb'))
pickle.dump(rewards_std,
open(os.path.join(self.assets_dir, self.rewards_std_saving_path), 'wb'))
pickle.dump(env_avg_reward,
open(os.path.join(self.assets_dir, self.env_avg_reward_saving_path), 'wb'))
pickle.dump(v_loss_list,
open(os.path.join(self.assets_dir, self.true_v_loss_list_saving_path), 'wb'))
pickle.dump(p_loss_list,
open(os.path.join(self.assets_dir, self.p_loss_list_saving_path), 'wb'))
pickle.dump(eval_avg_reward,
open(os.path.join(self.assets_dir, self.eval_avg_R_saving_path), 'wb'))
pickle.dump(eval_avg_reward_std,
open(os.path.join(self.assets_dir, self.eval_avg_R_std_saving_path), 'wb'))
def save_models(self, policy_net, value_net, running_state):
pickle.dump((policy_net, value_net, running_state), open(os.path.join(self.assets_dir, self.model_saving_path), 'wb'))
def save_intermediate_models(self, policy_net, value_net, running_state, i_iter):
pickle.dump((policy_net, value_net, running_state), open(os.path.join(self.assets_dir, self.intermediate_model_saving_path, 'model_iter_{}.p'.format(i_iter+1)), 'wb'))
| StarcoderdataPython |
1768216 | def enable_dropout(model):
for m in model.modules():
if m.__class__.__name__.startswith('Dropout'):
m.train()
def disable_dropout(model):
for m in model.modules():
if m.__class__.__name__.startswith('Dropout'):
m.eval() | StarcoderdataPython |
3232812 | <reponame>szymanskir/Face-Recognition
# -*- coding: utf-8 -*-
import click
import logging
import pandas as pd
from sklearn.decomposition import PCA
def create_pca_model(number_of_components, train_faces):
pca = PCA(n_components=number_of_components, random_state=0)
pca.fit(train_faces)
return(pca)
@click.command()
@click.argument('input_train_faces', type=click.Path(exists=True))
@click.argument('input_test_faces', type=click.Path(exists=True))
@click.argument('output_train_faces', type=click.Path())
@click.argument('output_test_faces', type=click.Path())
@click.argument('number_of_components', type=click.INT)
def main(input_train_faces,
input_test_faces,
output_train_faces,
output_test_faces,
number_of_components):
"""
Extracts features using the PCA decomposition algorithm (eigenfaces)
"""
logger = logging.getLogger(__name__)
logger.info(f'Extracting features using the PCA algorithm(n={number_of_components})...')
train_faces = pd.read_csv(input_train_faces)
test_faces = pd.read_csv(input_test_faces)
pca = create_pca_model(number_of_components, train_faces)
train_features = pd.DataFrame(pca.transform(train_faces))
test_features = pd.DataFrame(pca.transform(test_faces))
logging.info("Finished extracting features")
logging.info("Saving extracted features")
train_features.to_csv(output_train_faces, index=False)
test_features.to_csv(output_test_faces, index=False)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
| StarcoderdataPython |
1738012 | <gh_stars>0
""" A library of projection bases for Underdamped Langevin Inference. """
import numpy as np
def basis_selector(basis,data):
is_interacting = False
if basis['type'] == 'polynomial':
funcs = polynomial_basis(data.d,basis['order'])
elif basis['type'] == 'Fourier':
funcs = Fourier_basis(data.d,basis['width_x'],basis['width_v'],basis['center'],basis['order'])
elif basis['type'] == 'fourierX_polyV':
funcs = fourierX_polyV_basis(data.d,basis['center_x'],basis['width_x'],basis['order_x'],basis['order_v'])
else: # Interacting particles basis
is_interacting = True
if basis['type'] == 'particles_pair_interaction':
funcs = particles_pair_interaction(data.d,basis['kernels'])
if basis['type'] == 'particles_pair_alignment_interaction':
funcs = particles_pair_alignment_interaction(basis['kernels'])
elif basis['type'] == 'self_propelled_particles':
funcs = self_propelled_particles_basis(basis['order'],basis['kernels'])
elif basis['type'] == 'aligning_self_propelled_particles':
funcs = aligning_self_propelled_particles_basis(basis['order'],basis['kernels'])
elif basis['type'] == 'aligning_flock':
funcs = aligning_flock_basis(basis['order'],basis['kernels'],data.d,basis['translation_invariant'])
else:
raise KeyError("Unknown basis type.")
return funcs,is_interacting
def polynomial_basis(dim,order):
# A simple polynomial basis, X -> X_mu X_nu ... up to polynomial
# degree 'order'.
# We first generate the coefficients, ie the indices mu,nu.. of
# the polynomials, in a non-redundant way. We start with the
# constant polynomial (empty list of coefficients) and iteratively
# add new indices.
coeffs = [ np.array([[]],dtype=int) ]
for n in range(order):
# Generate the next coefficients:
new_coeffs = []
for c in coeffs[-1]:
# We generate loosely ordered lists of coefficients
# (c1 >= c2 >= c3 ...) (avoids redundancies):
for i in range( (c[-1]+1) if c.shape[0]>0 else 2*dim ):
new_coeffs.append(list(c)+[i])
coeffs.append(np.array(new_coeffs,dtype=int))
# Group all coefficients together
coeffs = [ c for degree in coeffs for c in degree ]
def Polynomial(X,V):
return np.array([[ np.prod(np.array(list(X[i,:])+list(V[i,:]))[c]) for c in coeffs ] for i in range(X.shape[0])])
return Polynomial
def velocity_polynomial_basis(dim,order):
# A polynomial in the velocity only.
coeffs = [ np.array([[]],dtype=int) ]
for n in range(order):
# Generate the next coefficients:
new_coeffs = []
for c in coeffs[-1]:
# We generate loosely ordered lists of coefficients
# (c1 >= c2 >= c3 ...) (avoids redundancies):
for i in range( (c[-1]+1) if c.shape[0]>0 else dim ):
new_coeffs.append(list(c)+[i])
coeffs.append(np.array(new_coeffs,dtype=int))
# Group all coefficients together
coeffs = [ c for degree in coeffs for c in degree ]
return lambda X,V : np.array([[ np.prod(x[c]) for c in coeffs ] for x in V])
def polynomial_basis_labels(dim,order):
# A helper function: get the human-readable labels of the
# functions in the polynomial basis.
coeffs = [ np.array([[]],dtype=int) ]
for n in range(order):
# Generate the next coefficients:
new_coeffs = []
for c in coeffs[-1]:
# We generate loosely ordered lists of coefficients
# (c1 >= c2 >= c3 ...) (avoids redundancies):
for i in range( (c[-1]+1) if c.shape[0]>0 else 2*dim ):
new_coeffs.append(list(c)+[i])
coeffs.append(np.array(new_coeffs,dtype=int))
# Group all coefficients together
coeffs = [ c for degree in coeffs for c in degree ]
coeffs_lowdim = np.array([ [ list(c).count(i) for i in range(2*dim) ] for c in coeffs ])
labels = []
for c in coeffs_lowdim:
label = ""
for i,n in enumerate(c[:dim]):
if n > 0:
label += "x"+str(i)
if n > 1:
label += "^"+str(n)
label +="."
for i,n in enumerate(c[dim:]):
if n > 0:
label += "v"+str(i)
if n > 1:
label += "^"+str(n)
label +="."
if len(label)==0:
label = "1"
labels.append(label)
return labels
def Fourier_basis(dim,width_x,width_v,center,order):
coeffs = [ np.array([[]],dtype=int) ]
for n in range(order):
# Generate the next coefficients:
new_coeffs = []
for c in coeffs[-1]:
# We generate loosely ordered lists of coefficients
# (c1 >= c2 >= c3 ...) (avoids redundancies):
for i in range( (c[-1]+1) if c.shape[0]>0 else 2*dim ):
new_coeffs.append(list(c)+[i])
coeffs.append(np.array(new_coeffs,dtype=int))
coeffs = [ c for degree in coeffs[1:] for c in degree ]
coeffs_lowdim = np.array([ [ list(c).count(i) for i in range(2*dim) ] for c in coeffs ])
def Fourier(X,V):
Xc = 2 * np.pi* (X - center) / width_x
Vs = 2 * np.pi* V / width_v
vals = np.ones((len(Xc),2*len(coeffs_lowdim)+1))
for j,x in enumerate(Xc):
xv = np.array(list(x)+list(V[j]))
for i,c in enumerate(coeffs_lowdim):
vals[j,2*i+1] = np.cos( xv.dot(c))
vals[j,2*i+2] = np.sin( xv.dot(c))
return vals
return Fourier
def fourierX_polyV_basis(dim,center_x,width_x,order_x,order_v):
def polyV_basis(dim,order):
coeffs = [ np.array([[]],dtype=int) ]
for n in range(order):
# Generate the next coefficients:
new_coeffs = []
for c in coeffs[-1]:
# We generate loosely ordered lists of coefficients
# (c1 >= c2 >= c3 ...) (avoids redundancies):
for i in range( (c[-1]+1) if c.shape[0]>0 else dim ):
new_coeffs.append(list(c)+[i])
coeffs.append(np.array(new_coeffs,dtype=int))
# Group all coefficients together
coeffs = [ c for degree in coeffs for c in degree ]
return lambda V : np.array([[ np.prod(v[c]) for c in coeffs ] for v in V])
def fourierX_basis(dim,order,center,width):
coeffs = [ np.array([[]],dtype=int) ]
for n in range(order):
# Generate the next coefficients:
new_coeffs = []
for c in coeffs[-1]:
# We generate loosely ordered lists of coefficients
# (c1 >= c2 >= c3 ...) (avoids redundancies):
for i in range( (c[-1]+1) if c.shape[0]>0 else dim ):
new_coeffs.append(list(c)+[i])
coeffs.append(np.array(new_coeffs,dtype=int))
coeffs = [ c for degree in coeffs[1:] for c in degree ]
coeffs_lowdim = np.array([ [ list(c).count(i) for i in range(dim) ] for c in coeffs ])
def Fourier(X):
Xc = 2 * np.pi* (X - center) / width
vals = np.ones((len(Xc),2*len(coeffs_lowdim)+1))
for j,x in enumerate(Xc):
for i,c in enumerate(coeffs_lowdim):
vals[j,2*i+1] = np.cos( x.dot(c))
vals[j,2*i+2] = np.sin( x.dot(c))
return vals
return Fourier
fourierX = fourierX_basis(dim,order_x,center_x,width_x)
polyV = polyV_basis(dim,order_v)
def fourierX_polyV(X,V):
fX = fourierX(X)
pV = polyV(V)
return np.reshape(np.einsum('ia,ib->iab',fX,pV),(fX.shape[0],fX.shape[1]*pV.shape[1]))
return fourierX_polyV
### INTERACTING PARTICLES ###
def particles_pair_interaction(dim,kernels):
# Radially symmetric vector-like pair interactions as a sum of
# kernels. Two-particle functions are chosen to be of the form
# f(R_ij) * (Xj - Xi)/Rij for a given set of functions f
# (kernels).
def pair_function_spherical(X):
# X is a Nparticles x dim - shaped array.
Nparticles = X.shape[0]
Xij = np.array([[ Xj - Xi for j,Xj in enumerate(X) ] for i,Xi in enumerate(X) ])
Rij = np.linalg.norm(Xij,axis=2)
f_Rij = np.nan_to_num(np.array([ f(Rij)/Rij for f in kernels ]))
# Combine the kernel index f and the spatial index m into a
# single function index a:
return np.einsum('fij,ijm->ifm',f_Rij,Xij).reshape((Nparticles,dim * len(kernels)))
return pair_function_spherical
def self_propelled_particles_basis(order_single,kernels):
# A basis adapted to 2D self-propelled particles without alignment
self_propulsion = lambda X,V : np.array([ np.cos(X[:,2]), np.sin(X[:,2]),-V[:,2] ]).T
poly = polynomial_basis(2,order_single)
pair = particles_pair_interaction(2,kernels)
return lambda X,V : np.array([ v for v in poly(X[:,:2],V[:,:2]).T ]+[ V[:,2] ]+[ v for v in self_propulsion(X,V).T ]+[ v for v in pair(X[:,:2]).T ]).T
def particles_pair_alignment_interaction(kernels):
# Radially symmetric vector-like pair interactions as a sum of
# kernels. Two-particle functions are chosen to be of the form
# f(R_ij) * (Xj - Xi)/Rij for a given set of functions f
# (kernels).
def pair_function_alignment(X):
# X is a Nparticles x dim - shaped array.
Nparticles = X.shape[0]
Xij = np.array([[ Xj - Xi for j,Xj in enumerate(X) ] for i,Xi in enumerate(X) ])
Xij[:,:,2] = np.sin(Xij[:,:,2])
Rij = np.linalg.norm(Xij[:,:,:2],axis=2)
f_Rij = np.nan_to_num(np.array([ f(Rij)/Rij for f in kernels ]))
# Combine the kernel index f and the spatial index m into a
# single function index a:
return np.einsum('fij,ijm->ifm',f_Rij,Xij).reshape((Nparticles, 3 * len(kernels)))
return pair_function_alignment
def aligning_self_propelled_particles_basis(order_single,kernels):
# A basis adapted to 2D self-propelled particles without alignment
self_propulsion = lambda X,V : np.array([ np.cos(X[:,2]), np.sin(X[:,2]),-V[:,2] ]).T
poly = polynomial_basis(2,order_single)
pair_align = particles_pair_alignment_interaction(kernels)
return lambda X,V : np.array([ v for v in poly(X[:,:2],V[:,:2]).T ]+[ v for v in self_propulsion(X,V).T ]+[ v for v in pair_align(X).T ] ).T
def aligning_flock_basis(order_single,kernels,dim,translation_invariant):
# A basis adapted to 2D self-propelled particles without alignment
if translation_invariant:
poly = velocity_polynomial_basis(dim,order_single)
else:
poly = polynomial_basis(dim,order_single)
def pair_align(X,V):
# X is a Nparticles x dim - shaped array.
Nparticles = X.shape[0]
Xij = np.array([[ Xj - Xi for j,Xj in enumerate(X) ] for i,Xi in enumerate(X) ])
Vij = np.array([[ Vj - Vi for j,Vj in enumerate(V) ] for i,Vi in enumerate(V) ])
Rij = np.linalg.norm(Xij,axis=2)
f_Rij = np.nan_to_num(np.array([ f(Rij) for f in kernels ]))
# Combine the kernel index f and the spatial index m into a
# single function index a:
fX_i = np.einsum('fij,ijm->fim',f_Rij,Xij)
fV_i = np.einsum('fij,ijm->fim',f_Rij,Vij)
return np.einsum('fim->ifm', np.array([ x for x in fX_i]+[ v for v in fV_i])).reshape((Nparticles, (2*dim) * len(kernels)))
return lambda X,V : np.array([ v for v in poly(X,V).T ]+[ v for v in pair_align(X,V).T ] ).T
| StarcoderdataPython |
185264 | <filename>tests/test_pllcalcs.py
from unittest import TestCase
from pll.pll_calcs import *
class TestGeneralFunctions(TestCase):
def test_interp_linear_1(self):
""" test the linear interpolator with a value within the x array
"""
test_var = interp_linear([10,20], [1,2], 12)
self.assertAlmostEqual(1.2, test_var[1])
def test_interp_linear_2(self):
""" test the linear interpolator with a value below the x array
"""
test_var = interp_linear([1,2,3], [1,0,3], 1.5)
self.assertAlmostEqual(0.5, test_var[1])
def test_interp_linear_2(self):
""" test the linear interpolator with a value above the x array
"""
test_var = interp_linear([1,2,3], [1,2,3], 3.5)
self.assertAlmostEqual(3.5, test_var[1])
def test_freq_points_per_decade(self):
""" tests that the get_freq_points_per_decade() function returns
the correct array
"""
f_good = list(range(10,100,10))
f_good.extend(range(100,1000,100))
f_good.extend(range(1000,11000,1000))
[float(i) for i in f_good]
f_test = get_freq_points_per_decade(10,10000,10)
self.assertEqual( set(f_good), set(f_test))
class Test2ndOrderPassive(TestCase):
""" The only real function of the class is to provide component values.
Testing this function will indirectly test all underlying functions
of the class.
"""
def test_2nd_order_passive_phase_margin(self):
""" Tests full operation of PllSecondOrderPassive.
Instantiate the class with some hard-coded values. Simulate the PLL
by calling simulatePll function. Test that the phase margin (pm) and
cutoff frequency (fc) are equal to the hard-coded values.
"""
fc = 100e3
pm = 45.0
gamma = 1.024
kphi = 4.69e-3
kvco = 10e6
fstart = 1
fstop = 100e6
ptsPerDec = 100
N = 200
R = 4
pll = PllSecondOrderPassive( fc,
pm,
kphi,
kvco,
N,
gamma=gamma )
d_test = pll.calc_components()
pm_test, fc_test = get_pm_fc_from_actual_filter_components(d_test, fstart, fstop, ptsPerDec, kphi, kvco, N, R)
self.assertAlmostEqual(pm,pm_test)
def test_2nd_order_passive_loop_bandwidth(self):
""" Tests full operation of PllSecondOrderPassive.
Instantiate the class with some hard-coded values. Simulate the PLL
by calling simulatePll function. Test that the phase margin (pm) and
cutoff frequency (fc) are equal to the hard-coded values.
"""
fc = 100e3
pm = 45.0
gamma = 1.024
kphi = 4.69e-3
kvco = 10e6
fstart = 1
fstop = 100e6
ptsPerDec = 100
N = 200
R = 4
pll = PllSecondOrderPassive( fc,
pm,
kphi,
kvco,
N,
gamma=gamma )
d_test = pll.calc_components()
pm_test, fc_test = get_pm_fc_from_actual_filter_components(d_test, fstart, fstop, ptsPerDec, kphi, kvco, N, R)
self.assertAlmostEqual(fc,fc_test)
class Test3rdOrderPassive(TestCase):
""" The only real function of the class is to provide component values.
Testing this function will indirectly test all underlying functions
of the class.
"""
def test_3rd_order_passive_phase_margin(self):
""" Tests full operation of PllThirdOrderPassive.
Instantiate the class with some hard-coded values. Simulate the PLL
by calling simulatePll function. Test that the phase margin (pm) and
cutoff frequency (fc) are equal to the hard-coded values.
"""
fc = 100e3
pm = 45.0
kphi = 5e-3
kvco = 10e6
N = 200
fstart = 1
fstop = 100e6
ptsPerDec = 100
R = 1
pll = PllThirdOrderPassive(fc,
pm,
kphi,
kvco,
N,
gamma=1.024,
t31=0.6)
d_test = pll.calc_components()
pm_test, fc_test = get_pm_fc_from_actual_filter_components(d_test, fstart, fstop, ptsPerDec, kphi, kvco, N, R)
self.assertAlmostEqual(pm, pm_test)
def test_3rd_order_passive_loop_bandwidth(self):
""" Tests full operation of PllThirdOrderPassive.
Instantiate the class with some hard-coded values. Simulate the PLL
by calling simulatePll function. Test that the phase margin (pm) and
cutoff frequency (fc) are equal to the hard-coded values.
"""
fc = 100e3
pm = 45.0
kphi = 5e-3
kvco = 10e6
N = 200
fstart = 1
fstop = 100e6
ptsPerDec = 100
R = 1
pll = PllThirdOrderPassive( fc,
pm,
kphi,
kvco,
N,
gamma=1.024,
t31=0.6)
d_test = pll.calc_components()
pm_test, fc_test = get_pm_fc_from_actual_filter_components(d_test, fstart, fstop, ptsPerDec, kphi, kvco, N, R)
self.assertAlmostEqual(fc,fc_test)
############ Helper functions ############333
def get_pm_fc_from_actual_filter_components(d, fstart, fstop, ptsPerDec, kphi, kvco, N, R):
""" return pm and fc from simulating actual filter components
Parameters
d (dict) - returned from a call to calc_components in a pll class
Returns
tuple(pm (float), fc (float))
"""
flt = {
'c1':d['c1'],
'c2':d['c2'],
'c3':d['c3'],
'c4':d['c4'],
'r2':d['r2'],
'r3':d['r3'],
'r4':d['r4'],
'flt_type':"passive"
}
f,g,p,fz,pz,ref_cl,vco_cl = simulatePll( fstart,
fstop,
ptsPerDec,
kphi,
kvco,
N,
R,
filt=flt)
return pz, fz
| StarcoderdataPython |
3288555 | import pytest
import pdb
test_id = f"{'2.3.1':<10} - Profile Validation"
test_weight = 25
def test_validation_against(host):
assert 0 == 1, "TODO - Write Test"
| StarcoderdataPython |
66064 | <reponame>alod83/versatile-data-kit
# Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
import os
import pathlib
from click.testing import CliRunner
from py._path.local import LocalPath
from pytest_httpserver.pytest_plugin import PluginHTTPServer
from vdk.internal import test_utils
from vdk.internal.control.command_groups.job.download_job import download_job
from werkzeug import Response
test_utils.disable_vdk_authentication()
def _read_file(file_path):
with open(file_path, "rb") as job_archive_file:
file_path_binary = job_archive_file.read()
return file_path_binary
def test_download_source(httpserver: PluginHTTPServer, tmpdir: LocalPath):
rest_api_url = httpserver.url_for("")
temp_dir = tmpdir.mkdir("foo")
test_job_zip = test_utils.find_test_resource("job-zip/test-job.zip")
data = _read_file(test_job_zip)
httpserver.expect_request(
uri="/data-jobs/for-team/test-team/jobs/test-job/sources", method="GET"
).respond_with_data(data)
runner = CliRunner()
result = runner.invoke(
download_job,
["-n", "test-job", "-t", "test-team", "-u", rest_api_url, "-p", temp_dir],
)
assert (
result.exit_code == 0
), f"result exit code is not 0, result output: {result.output}"
expected_dir_job = os.path.join(temp_dir, "test-job")
assert pathlib.Path(expected_dir_job).is_dir()
assert pathlib.Path(expected_dir_job).joinpath("config.ini").is_file()
def test_download_source_dir_exists(httpserver: PluginHTTPServer, tmpdir: LocalPath):
rest_api_url = httpserver.url_for("")
temp_dir = tmpdir.mkdir("foo")
os.mkdir(os.path.join(temp_dir, "test-job"))
runner = CliRunner()
result = runner.invoke(
download_job,
["-n", "test-job", "-t", "test-team", "-u", rest_api_url, "-p", temp_dir],
)
assert (
result.exit_code == 2
), f"result exit code is not 1, result output: {result.output}"
# assert it failed for the right reason:
assert "Directory with name test-job already exists" in result.output
def test_download_job_does_not_exist(httpserver: PluginHTTPServer, tmpdir: LocalPath):
rest_api_url = httpserver.url_for("")
temp_dir = tmpdir.mkdir("foo")
httpserver.expect_request(
uri="/data-jobs/for-team/test-team/jobs/test-job/sources", method="GET"
).respond_with_response(Response(status=404))
runner = CliRunner()
result = runner.invoke(
download_job,
["-n", "test-job", "-t", "test-team", "-u", rest_api_url, "-p", temp_dir],
)
test_utils.assert_click_status(result, 2)
# assert the correct message is returned, when there is no data job for the
# specified team
assert "The requested resource cannot be found" in result.output
# assert that vdk does not try cleaning up a non-existent archive
assert "Cannot cleanup archive" not in result.output
| StarcoderdataPython |
692 | <reponame>dzzhvks94vd2/mikan
class MikanException(Exception):
"""Generic Mikan exception"""
class ConversionError(MikanException, ValueError):
"""Cannot convert a string"""
| StarcoderdataPython |
3200008 | class InvalidColor(Exception):
pass
class InvalidColorType(InvalidColor):
pass
class InvalidColorValue(InvalidColor):
pass
class InvalidOpacity(InvalidColor):
pass
| StarcoderdataPython |
3396895 | <gh_stars>0
from xmlrpc import client as xmlrpclib
import ssl
import csv
from scriptconfig import URL, DB, UID, PSW, WORKERS
socket = xmlrpclib.ServerProxy(URL,context=ssl._create_unverified_context())
input_file = 'files/ivlioh.csv'
input_file = csv.DictReader(open(input_file))
all_locations = socket.execute(DB, UID, PSW, 'stock.location', 'search_read', [('usage', '=', 'internal')], ['id','name'])
all_locations = {ele['name']:ele['id'] for ele in all_locations}
stock_location = all_locations.get('Stock', '')
if not stock_location:
print('WH\Stock location not found')
else:
for line in input_file:
try:
loc = line.get('BIN-CODE', '').strip()
if loc and loc not in all_locations:
vals = {
'name':loc,
'parent_id':stock_location,
'usage':'internal',
'active':True
}
id = socket.execute(DB, UID, PSW, 'stock.location', 'create', vals)
print(id)
all_locations[loc] = id
except Exception as e:
print(e)
| StarcoderdataPython |
194220 | from django.conf import settings
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.shortcuts import redirect, render, get_object_or_404
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.views import View
from django.views.decorators.debug import sensitive_post_parameters
from app.models import SignupInvite, User, Player, PlayerRole, ParticipantRole, Moderator, Spectator
from app.util import most_recent_game, game_required
from .forms import UserSignupForm, UnrestrictedUserSignupForm
def signup(request, signup_invite):
invite = get_object_or_404(SignupInvite, pk=signup_invite)
if User.objects.filter(email=invite.email).exists():
return redirect('token_game_signup', signup_invite=signup_invite)
else:
return redirect('user_signup', signup_invite=signup_invite)
class UserSignupView(View):
template_name = "registration/user_signup.html"
def dispatch(self, request, *args, **kwargs):
signup_invite = get_object_or_404(SignupInvite, pk=kwargs.get('signup_invite'))
if signup_invite.used_at:
messages.info(request, f"You've already created an account using {signup_invite.email}.")
return redirect('dashboard')
kwargs.update({'signup_invite': signup_invite})
return super().dispatch(request, *args, **kwargs)
def render_user_signup(self, request, signup_invite: SignupInvite, user_signup_form=UserSignupForm()):
return render(request, self.template_name, {
'signup_invite': signup_invite,
'user_signup_form': user_signup_form
})
def get(self, request, signup_invite: SignupInvite):
return self.render_user_signup(request, signup_invite)
@method_decorator(sensitive_post_parameters('password1', 'password2'))
def post(self, request, signup_invite: SignupInvite):
user_signup_form = UserSignupForm(request.POST)
if not user_signup_form.is_valid():
return self.render_user_signup(request, signup_invite, user_signup_form=user_signup_form)
cleaned_data = user_signup_form.cleaned_data
first_name, last_name, password = cleaned_data['first_name'], cleaned_data['last_name'], cleaned_data[
'password1']
User.objects.create_user(signup_invite.email, password, first_name=first_name, last_name=last_name)
signup_invite.used_at = timezone.now()
signup_invite.save()
user = authenticate(username=signup_invite.email, password=password)
login(request, user)
return redirect('token_game_signup', signup_invite=signup_invite.id)
class UnrestrictedUserSignupView(View):
template_name = "registration/unrestricted_user_signup.html"
def dispatch(self, request, *args, **kwargs):
if settings.TOKEN_RESTRICTED_SIGNUPS:
raise Http404()
return super().dispatch(request, *args, **kwargs)
def render_user_signup(self, request, user_signup_form=UnrestrictedUserSignupForm()):
return render(request, self.template_name, {
'user_signup_form': user_signup_form
})
def get(self, request):
return self.render_user_signup(request)
@method_decorator(sensitive_post_parameters('password1', 'password2'))
def post(self, request):
user_signup_form = UnrestrictedUserSignupForm(request.POST)
if not user_signup_form.is_valid():
return self.render_user_signup(request, user_signup_form=user_signup_form)
cleaned_data = user_signup_form.cleaned_data
email, first_name, last_name, password = cleaned_data['email'], cleaned_data['first_name'], cleaned_data[
'last_name'], cleaned_data['password1']
if User.objects.filter(email=email).exists():
messages.error(request, "An user with this email already exists. Please try logging in!")
return self.render_user_signup(request, user_signup_form=user_signup_form)
User.objects.create_user(email, password, first_name=first_name, last_name=last_name)
user = authenticate(username=email, password=password)
login(request, user)
return redirect('game_signup')
@method_decorator(login_required, name='dispatch')
@method_decorator(game_required, name='dispatch')
class GameSignupView(View):
def get(self, request):
game = most_recent_game()
if request.user.participant(game):
return redirect('dashboard')
if game.is_running:
messages.warning(request, "The game has already started please contact a mod.")
return redirect('dashboard')
if game.is_finished:
messages.warning(request, "The game is over.")
return redirect('dashboard')
return render(request, "registration/game_signup.html", {'game': game, 'participant_role': None})
def post(self, request):
game = most_recent_game()
in_oz_pool = request.POST.get('is_oz', 'off') == 'on'
has_signed_waiver = request.POST.get('accept_waiver', 'off') == 'on'
if not has_signed_waiver:
messages.warning(request, "Please sign the waiver.")
return self.get(request)
if request.user.participant(game):
messages.warning(request, f"You're already signed up for the {game} game.")
return redirect('dashboard')
Player.objects.create_player(request.user, game, PlayerRole.HUMAN, in_oz_pool=in_oz_pool)
return redirect('dashboard')
@method_decorator(login_required, name='dispatch')
@method_decorator(game_required, name='dispatch')
class TokenRequiredGameSignupView(View):
def get(self, request, signup_invite):
game = most_recent_game()
invite = SignupInvite.objects.get(pk=signup_invite)
forced_role = invite.participant_role
if request.user.participant(game):
messages.warning(request, "You're already signed up for the game.")
return redirect('dashboard')
if game.is_running and not forced_role:
messages.warning(request, "The game has already started please contact a mod.")
return redirect('dashboard')
if game.is_finished:
messages.warning(request, "The game is over.")
return redirect('dashboard')
return render(request, "registration/game_signup.html", {'game': game, 'participant_role': forced_role})
def post(self, request, signup_invite):
game = most_recent_game()
invite = SignupInvite.objects.get(pk=signup_invite)
forced_role = invite.participant_role
in_oz_pool = request.POST.get('is_oz', 'off') == 'on'
has_signed_waiver = request.POST.get('accept_waiver', 'off') == 'on'
if not has_signed_waiver:
messages.warning(request, "Please sign the waiver.")
return self.get(request)
if request.user.participant(game):
messages.warning(request, f"You're already signed up for the {game} game.")
return redirect('dashboard')
if forced_role:
if forced_role == ParticipantRole.MODERATOR:
Moderator.objects.create_moderator(request.user, game)
elif forced_role == ParticipantRole.SPECTATOR:
Spectator.objects.create_spectator(request.user, game)
else:
equivalent_role = PlayerRole.HUMAN if forced_role == ParticipantRole.HUMAN else PlayerRole.ZOMBIE
Player.objects.create_player(request.user, game, equivalent_role)
else:
Player.objects.create_player(request.user, game, PlayerRole.HUMAN, in_oz_pool=in_oz_pool)
messages.success(request, f"You've successfully signed up for the {game} game.")
return redirect('dashboard')
| StarcoderdataPython |
3255495 | <filename>hoodie/templatetags/util.py<gh_stars>1-10
import re
from django import template
from django.urls import reverse, NoReverseMatch
register = template.Library()
@register.simple_tag(takes_context=True)
def active(context, pattern_or_urlname):
try:
pattern = "^" + reverse(pattern_or_urlname) + "$"
except NoReverseMatch:
pattern = pattern_or_urlname
path = context["request"].path
if re.search(pattern, path):
return "active"
return ""
@register.filter
def get_type(value):
return type(value).__name__
@register.filter
def prettify(value, sep="_"):
return value.replace(sep, " ").capitalize()
| StarcoderdataPython |
1680022 | <reponame>jbeilstenedmands/cctbx_project<gh_stars>0
from __future__ import absolute_import, division, print_function
from builtins import range
from libtbx import utils
from libtbx.test_utils import Exception_expected, approx_equal, show_diff
from six.moves import cStringIO as StringIO
import warnings
import random
import time
import os
import stat
import tempfile
def exercise_misc():
utils.host_and_user().show(prefix="### ")
time_in_seconds = 1.1
for i_trial in range(55):
time_in_seconds = time_in_seconds**1.1
time_units, time_unit = utils.human_readable_time(
time_in_seconds=time_in_seconds)
assert approx_equal(
utils.human_readable_time_as_seconds(time_units, time_unit),
time_in_seconds)
#
fts = utils.format_timestamp
f12 = utils.format_timestamp_12_hour
f24 = utils.format_timestamp_24_hour
def check(string, expected):
assert len(string) == len(expected)
check(f12(1280007000), 'Jul 24 2010 02:30 PM')
check(f24(1280007000), 'Jul 24 2010 14:30')
check(f12(1280007000, True), '24-07-10 02:30 PM')
check(f24(1280007000, True), '24-07-10 14:30')
check(fts(1280007000), 'Jul 24 2010 02:30 PM')
#
nfs = utils.number_from_string
for string in ["True", "False"]:
try: nfs(string=string)
except ValueError as e:
assert str(e) == 'Error interpreting "%s" as a numeric expression.' % (
string)
else: raise Exception_expected
assert nfs(string="-42") == -42
assert approx_equal(nfs(string="3.14"), 3.14)
assert approx_equal(nfs(string="cos(0)"), 1)
try: nfs(string="xxx(0)")
except ValueError as e:
assert str(e).startswith(
'Error interpreting "xxx(0)" as a numeric expression: ')
else: raise Exception_expected
#
s = "[0.143139, -0.125121, None, -0.308607]"
assert numstr(values=eval(s)) == s
#
for s,i in {"2000000" : 2000000,
"2k" : 2048,
"2Kb" : 2048,
"2 Kb" : 2048,
"5Mb" : 5*1024*1024,
"2.5Gb" : 2.5*1024*1024*1024,
"1T": 1024*1024*1024*1024,
10000 : 10000,
5.5 : 5.5,
}.items():
assert utils.get_memory_from_string(s) == i
#
assert utils.tupleize(1) == (1,)
assert utils.tupleize("abcde") == ('a', 'b', 'c', 'd', 'e')
assert utils.tupleize([1,2,3]) == (1,2,3)
#
sf = utils.search_for
assert sf(pattern="fox", mode="==", lines=["fox", "foxes"]) \
== ["fox"]
assert sf(pattern="o", mode="find", lines=["fox", "bird", "mouse"]) \
== ["fox", "mouse"]
assert sf(pattern="fox", mode="startswith", lines=["fox", "foxes"]) \
== ["fox", "foxes"]
assert sf(pattern="xes", mode="endswith", lines=["fox", "foxes"]) \
== ["foxes"]
assert sf(pattern="es$", mode="re.search", lines=["geese", "foxes"]) \
== ["foxes"]
assert sf(pattern="ge", mode="re.match", lines=["geese", "angel"]) \
== ["geese"]
#
nd1d = utils.n_dim_index_from_one_dim
for size in range(1,5):
for i1d in range(size):
assert nd1d(i1d=i1d, sizes=(size,)) == [i1d]
for sizes in [(1,1), (1,3), (3,1), (2,3)]:
ni, nj = sizes
for i in range(ni):
for j in range(nj):
i1d = i*nj+j
assert nd1d(i1d=i1d, sizes=sizes) == [i,j]
for sizes in [(1,1,1), (1,3,1), (3,2,1), (4,3,2)]:
ni, nj, nk = sizes
for i in range(ni):
for j in range(nj):
for k in range(nk):
i1d = (i*nj+j)*nk+k
assert nd1d(i1d=i1d, sizes=sizes) == [i,j,k]
#
from libtbx import easy_run
b = easy_run.fully_buffered(
command="libtbx.raise_exception_for_testing")
for lines in [b.stdout_lines, b.stderr_lines]:
assert lines[0].startswith("EXCEPTION_INFO: show_stack(0): ")
assert lines[-1] == "EXCEPTION_INFO: RuntimeError: Just for testing."
b = easy_run.fully_buffered(
command="libtbx.raise_exception_for_testing silent")
b.raise_if_errors_or_output()
#
frange = utils.frange
samples = utils.samples
assert approx_equal([i/10. for i in range(-2,2)], frange(-0.2,0.2,0.1))
assert approx_equal([i/10. for i in range(-2,2+1)], samples(-0.2,0.2,0.1))
assert approx_equal([i/10. for i in range(2,-2,-1)], frange(0.2,-0.2,-0.1))
assert approx_equal([i/10. for i in range(2,-2-1,-1)], samples(0.2,-0.2,-0.1))
assert approx_equal([i/4. for i in range(4,8)], frange(1, 2, 0.25))
assert approx_equal([i/4. for i in range(4,8+1)], samples(1, 2, 0.25))
assert approx_equal([0.2+i/3. for i in range(4)], frange(0.2, 1.3, 1./3))
assert approx_equal([0.2+i/3. for i in range(4)], samples(0.2, 1.3, 1./3))
assert approx_equal(list(range(5)) , frange(5))
assert approx_equal(list(range(5+1)) , samples(5))
assert approx_equal(list(range(-5)), frange(-5))
assert approx_equal(list(range(-5-1)), samples(-5))
assert approx_equal(list(range(1,3)), frange(1, 3))
assert approx_equal(list(range(1,3+1)), samples(1, 3))
assert approx_equal([i/10. for i in range(20,9,-2)], frange(2.0,0.9,-0.2))
assert approx_equal([i/10. for i in range(20,9,-2)], samples(2.0,0.9,-0.2))
#
ff = utils.format_float_with_standard_uncertainty
assert ff(21.234567, 0.0013) == "21.2346(13)"
assert ff(21.234567, 0.0023) == "21.235(2)"
assert ff(12345, 45) == "12350(50)"
assert ff(12.3,1.2) == "12.3(12)"
assert ff(-0.2451, 0.8135) == "-0.2(8)"
assert ff(1.234, 0.196) == "1.2(2)"
assert ff(1.234, 0.193) == "1.23(19)"
#
for n in range(4):
assert len(utils.random_hex_code(number_of_digits=n)) == n
#
print("multiprocessing problem:", utils.detect_multiprocessing_problem())
#
print("base36_timestamp():", utils.base36_timestamp(), "now")
print("base36_timestamp():", utils.base36_timestamp(
seconds_since_epoch=115855*365.2425*24*60*60), "year 115855 CE")
#
print("get_svn_revision():", utils.get_svn_revision())
print("get_build_tag():", utils.get_build_tag())
# concatenate_python_script
# XXX the string concatenation here is required to trick libtbx.find_clutter,
# which will warn about repetition of the future division import.
script = """
from __future__ """ + """import division
import os.path
def foo () :
print "bar"
"""
d = tempfile.mkdtemp()
name = os.path.join(d, "tst_libtbx_utils_python_script.py")
name2 = os.path.join(d, "tst_libtbx_utils_python_script2.py")
open(name, "w").write(script)
f = open(name2, "w")
utils.concatenate_python_script(out=f, file_name=name)
f.close()
lines = open(name2).readlines()
have_def = False
for line in lines :
assert (not "__future__" in line)
if line.startswith("def foo") :
have_def = True
assert have_def
def exercise_user_plus_sys_time():
s = StringIO()
utils.user_plus_sys_time().show_elapsed(out=s, prefix="e: ")
s = s.getvalue()
assert s.startswith("e: ")
assert s.endswith(" s")
utils.user_plus_sys_time().show_delta(out=s, prefix="d: ")
s = s.getvalue()
assert s.startswith("d: ")
assert s.endswith(" s")
def exercise_indented_display():
out = StringIO()
level0 = utils.buffered_indentor(file_object=out)
print("level0", file=level0)
level0.flush()
level1 = level0.shift_right()
print("level1", file=level1)
level1.flush()
assert out.getvalue() == ""
level1.write_buffer()
assert not show_diff(out.getvalue(), """\
level0
level1
""")
print("abc", end='', file=level1)
level1.write_buffer()
assert not show_diff(out.getvalue(), """\
level0
level1
abc""")
print(file=level1)
level1.write_buffer()
assert not show_diff(out.getvalue(), """\
level0
level1
abc
""")
print("def", end='', file=level1)
level1.write_buffer()
assert not show_diff(out.getvalue(), """\
level0
level1
abc
def""")
level1.write("")
print("hij", file=level1)
level1.write_buffer()
assert not show_diff(out.getvalue(), """\
level0
level1
abc
def hij
""")
def exercise_approx_equal():
assert approx_equal(1., 1. + 1e-11)
assert approx_equal(1+1j, 0.997+1.004j, eps=1e-2)
assert approx_equal(1, 0.997+0.004j, eps=1e-2)
assert approx_equal(1+0.003j, 0.997, eps=1e-2)
assert approx_equal([ 2.5, 3.4+5.8j, 7.89],
[ 2.4+0.1j, 3.5+5.9j, 7.90], eps=0.2)
def exercise_file_utils () :
dir_name = tempfile.mkdtemp()
if (not os.path.exists(dir_name)) :
os.mkdir(dir_name)
sorted_files = []
for prefix in ["XYZ", "abc", "qwerty", "123"] :
file_name = os.path.join(dir_name, "%s.txt" % prefix)
open(file_name, "w").write(prefix)
sorted_files.append(file_name)
time.sleep(1) # XXX the mtime resolution is in seconds :(
f = open(os.path.join(dir_name, "hkl.log"), "w")
f.write("hkl")
f.close()
file_names = utils.find_files(dir_name, pattern=".txt$")
sorted_files_2 = utils.sort_files_by_mtime(file_names)
assert (sorted_files_2 == sorted_files), '''
Files not in correct order:
%s
%s
''' % (sorted_files_2, sorted_files)
def exercise_dir_utils () :
dirs = ["tst_utils_1", "tst_utils_2", "tst_utils_45"]
for dir_name in dirs :
if (os.path.isdir(dir_name)) : os.rmdir(dir_name)
dir_name = utils.create_run_directory("tst_utils")
assert (os.path.basename(dir_name) == "tst_utils_1")
dir_name = utils.create_run_directory("tst_utils")
assert (os.path.basename(dir_name) == "tst_utils_2")
dir_name = utils.create_run_directory("tst_utils", 45)
assert (os.path.basename(dir_name) == "tst_utils_45")
for dir_name in dirs :
os.rmdir(dir_name)
file_name = "/cctbx/%s/%s/XXXX.pdb" % (random.random(), random.random())
try :
utils.check_if_output_directory_exists(file_name)
except utils.Sorry :
pass
else :
raise Exception_expected
dir_name = os.getcwd()
utils.check_if_output_directory_exists(dir_name=dir_name)
dir_created = False
if (not os.path.exists("Dropbox")) :
os.mkdir("Dropbox")
dir_created = True
dir_name = os.path.join(os.getcwd(), "Dropbox")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
utils.check_if_output_directory_exists(dir_name=dir_name)
assert len(w) == 1
assert "Dropbox directory" in str(w[-1].message)
if (dir_created) :
os.rmdir("Dropbox")
host_info = utils.host_and_user()
assert not utils.allow_delete_directory(host_info.homedir)
target_dir = os.path.join(host_info.homedir, "Downloads")
assert not utils.allow_delete_directory(target_dir)
target_dir = os.path.join(host_info.homedir, "data", "lysozyme")
assert utils.allow_delete_directory(target_dir)
def exercise_retrieve_unless_exists():
import urllib
filehandle, filename = tempfile.mkstemp(prefix='kings_of_france')
# we will need to pass filename to functions which will open it
# on Windows this causes a permission exception
os.close(filehandle)
with open(filename, 'w') as f:
f.write(
'Henri IV, Louis XIII, Louis XIV, Louis XV, Louis XVI, Louis XVIII')
digestname = os.path.join(os.path.dirname(f.name), 'digests.txt')
with open(digestname, 'w') as f:
f.writelines([
('%s %s\n') %
(os.path.basename(filename), utils.md5_hexdigest(filename)),
'something_else yyyyyyy',
])
os.chmod(digestname,
os.stat(digestname).st_mode | stat.S_IWGRP | stat.S_IWOTH)
d = tempfile.mkdtemp()
targetname = os.path.join(d, 'target')
try: os.remove(targetname)
except Exception: pass
url = 'file:' + urllib.pathname2url(filename)
assert (utils.retrieve_unless_exists(url=url, filename=targetname) ==
"Downloaded")
with open(filename) as source, open(targetname) as target:
assert source.read() == target.read()
assert (utils.retrieve_unless_exists(url=url, filename=targetname) ==
"Cached")
with open(filename) as source, open(targetname) as target:
assert source.read() == target.read()
def exercise_str_unicode():
# tests for to_unicode and to_str
s = '\xc3\x85'
u = u'\xc5'
assert(to_unicode(s) == u)
assert(to_str(u) == s)
def exercise_group_args():
from libtbx import group_args
out = StringIO()
a = group_args(
a=1,
b=2,
c=3)
assert a.a==1
assert a.b==2
assert a.c==3
b = group_args(
d = 'd',
e = 'e')
assert b.d=='d'
assert b.e=='e'
print(a, file=out)
v = out.getvalue()
assert not show_diff(v, """group_args
a : 1
b : 2
c : 3\n""")
a.merge(b)
assert a.a==1
assert a.b==2
assert a.c==3
assert a.d=='d'
assert a.e=='e'
assert b.d=='d'
assert b.e=='e'
c = group_args(
a = 11,
b = 12)
a.merge(c)
assert a.a==11
assert a.b==12
assert a.c==3
assert c.a==11
assert c.b==12
def exercise_round2():
assert(2 == int(utils.round2(1.5, 0)))
assert(3 == int(utils.round2(2.5, 0)))
assert(-2 == int(utils.round2(-1.5, 0)))
assert(-3 == int(utils.round2(-2.5, 0)))
assert approx_equal(0.2, utils.round2(0.15, 1))
assert approx_equal(0.3, utils.round2(0.25, 1))
assert approx_equal(-0.2, utils.round2(-0.15, 1))
assert approx_equal(-0.3, utils.round2(-0.25, 1))
def run(args):
assert len(args) == 0
if '--exercise-retrieve-unless-exists' in args:
exercise_retrieve_unless_exists()
else:
print('Skipping exercise_retrieve_unless_exists')
exercise_misc()
assert utils.sequence_index_dict(["a", "b"]) == {"a": 0, "b": 1}
assert utils.flat_list(0) == [0]
assert utils.flat_list([1,2,3]) == [1,2,3]
assert utils.flat_list([1,[2,3,4],3]) == [1,2,3,4,3]
assert utils.flat_list([1,[2,3,4],[[3,4],[5,6]]]) == [1,2,3,4,3,4,5,6]
try:
raise RuntimeError("Trial")
except KeyboardInterrupt: raise
except Exception:
assert utils.format_exception() == "RuntimeError: Trial"
else: raise Exception_expected
try:
assert 1 == 2
except KeyboardInterrupt: raise
except Exception:
s = utils.format_exception()
assert s.startswith("AssertionError: ")
assert s.find("tst_utils.py line ") >= 0
else: raise Exception_expected
exercise_indented_display()
exercise_approx_equal()
exercise_file_utils()
exercise_dir_utils()
exercise_group_args()
exercise_round2()
print(utils.format_cpu_times())
if (__name__ == "__main__"):
import sys
run(args=sys.argv[1:])
| StarcoderdataPython |
3354858 | <filename>九、奇淫巧计类/6.红黑树节点操作.py<gh_stars>1-10
class Node:
def __init__(self, key=None, color=None, size=0):
self.key = key
self.color = color
self.size = size
self.left = None
self.right = None
self.p = None
class Tree:
def __init__(self):
self.root = None
self.nil = Node()
class Solution:
def leftRotate(self, T: Tree, x: Node):
y = x.right
x.right = y.left
if y.left != T.nil:
y.left.p = x
y.p = x.p
if x.p == T.nil:
T.root = y
elif x.p.left == x:
x.p.left = y
else:
x.p.right = y
y.left = x
x.p = y
y.size = x.size
x.size = x.left.size + x.right.size + 1
def rightRotate(self, T: Tree, x: Node):
y = x.left
x.left = y.right
if y.right != T.nil:
y.right.p = x
y.p = x.p
if x.p == T.nil:
T.root = y
elif x.p.left == x:
x.p.left = y
else:
x.p.right = y
y.right = x
x.p = y
y.size = x.size
x.size = x.left.size + x.right.size + 1
def | StarcoderdataPython |
4820081 | <gh_stars>1-10
# coding=utf-8
"""
Copyright 2013 Load Impact
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ['ApiError', 'MissingApiTokenError', 'ResponseParseError',
'ConnectionError', 'TimeoutError', 'HTTPError', 'ClientError',
'BadRequestError', 'UnauthorizedError', 'ForbiddenError',
'NotFoundError', 'MethodNotAllowedError', 'ConflictError',
'GoneError', 'RateLimitError', 'ServerError']
class ApiError(Exception):
"""All Load Impact API exceptions derive from this class."""
class MissingApiTokenError(ApiError):
"""Raised when AI client is created and an API token can't be found."""
class CoercionError(ApiError):
"""Raised when a resource field value coercion fails."""
class ResponseParseError(ApiError):
"""Raised when parsing of API response fails."""
class ConnectionError(ApiError):
"""Raised when a TCP connection error is encountered."""
class TimeoutError(ApiError):
"""Raised when a TCP connection timeout is encountered."""
class HTTPError(ApiError):
"""All HTTP exception classes derive from this base class."""
def __init__(self, msg=None, response=None):
super(HTTPError, self).__init__(msg)
self.response = response
class ClientError(HTTPError):
"""Raised when 4xx HTTP response code is encountered with no specialized
exception class.
"""
class BadRequestError(ClientError):
"""Raised when 400 HTTP status code is encountered."""
class UnauthorizedError(ClientError):
"""Raised when 401 HTTP status code is encountered."""
class ForbiddenError(ClientError):
"""Raised when 403 HTTP status code is encountered."""
class NotFoundError(ClientError):
"""Raised when 404 HTTP status code is encountered."""
class MethodNotAllowedError(ClientError):
"""Raised when 405 HTTP status code is encountered."""
class ConflictError(ClientError):
"""Raised when 409 HTTP status code is encountered."""
class GoneError(ClientError):
"""Raised when 410 HTTP status code is encountered."""
class RateLimitError(ClientError):
"""Raised when 427 HTTP status code is encountered."""
class ServerError(HTTPError):
"""Raised when 5xx HTTP response code is encountered with no specialized
exception class.
"""
| StarcoderdataPython |
1631709 | # (C) Copyright 2017 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
smicli commands based on python click for executing sweeps of selected
targets to find WBEM servers.
"""
from __future__ import print_function, absolute_import
from collections import defaultdict
import click
import six
from mysql.connector import Error as MySQLError
from smipyping import UsersTable, CompaniesTable, datetime_display_str
from .smicli import cli, CMD_OPTS_TXT
from ._click_common import validate_prompt, print_table, pick_from_list, \
pick_multiple_from_list, test_db_updates_allowed
from ._common_options import add_options, no_verify_option
from ._cmd_companies import get_companyid
def select_companyid(context, companyid):
"""
Get the company id either interactively or by confirming. If
select not requested, this validates the companyid. Used for the list
and activate to limit the selection lists.
"""
companies_tbl = CompaniesTable.factory(context.db_info, context.db_type,
context.verbose)
if companyid is None:
return None
companyid = get_companyid(context, companies_tbl, companyid)
return companyid
def build_userid_display(userid, user_item):
"""Build and return the string to display for selecting a user.
Displays info from users table so user can pick the ids.
"""
return u' id=%-3s %-20s %-16s %-16s %s, %s' % (userid,
user_item['CompanyName'],
user_item['FirstName'],
user_item['Lastname'],
user_item['Email'],
user_item['Active'])
def pick_multiple_user_ids(context, users_tbl, active=None, companyid=None):
"""
Interactive selection of user ids from list presented on the console.
Parameters ()
The click_context which contains user data information.
Returns:
userid selected or None if user enter ctrl-C
"""
if active is None:
userids = users_tbl.keys()
else:
userids = users_tbl.get_active_userids(active)
if companyid:
companyid = select_companyid(context, companyid)
userids = [userid for userid in userids
if users_tbl[userid]['CompanyID'] == companyid]
display_txt = [build_userid_display(userid, users_tbl[userid],)
for userid in userids]
try:
indexes = pick_multiple_from_list(context, display_txt,
"Pick UserIDs:")
except ValueError:
pass
if indexes is None:
click.echo('Abort command')
return None
return [userids[index] for index in indexes]
def get_multiple_user_ids(context, userids, users_tbl, options=None,
allow_none=False, active=None, companyid=None):
"""
Get the users based on the value of userid or the value of the
interactive option. If userid is an
integer, get userid directly and generate exception if this fails.
If it is ? use the interactive pick_target_id.
If options exist test for 'interactive' option and if set, call
pick_target_id
This function executes the pick function if the userid is "?" or
if options is not None and the interactive flag is set
This is a support function for any subcommand requiring the target id
This support function always tests the target_id to against the
targets table.
Parameters:
context(): Current click context
userids(list of :term:`string` or list of:term:`integer` or None):
The userids as a string or integer or the
string "?" or the value None. If string or int userids are provided
they are used as the basis for testing against table and returned
as list of valid integer userids.
options: The click options. Used to determine if --interactive mode
is defined
allow_none (:class:`py:bool`):
If True, None is allowed as a value and returned. Otherwise
None is considered invalid. This is used to separate the cases
where the user id is an option that may have a None value vs.
those cases where it is a required parameter.
active (:class:`py:bool`):
Boolean used to filter the returned list.
* If `True`, only userids for active users are returned
* If `False`, only userids for inactive users are returned.
* if `None`, all user ids are returned
Returns:
Returns integer target_id of a valid targetstbl TargetID
raises:
KeyError if target_id not in table and allow_none is False
"""
if allow_none and userids is None or userids == []:
return userids
context.spinner.stop()
int_user_ids = []
# TODO: Future remove this option
if options and 'interactive' in options and options['interactive']:
context.spinner.stop()
int_user_ids = pick_multiple_user_ids(context, users_tbl)
elif isinstance(userids, (list, tuple)):
if len(userids) == 1 and userids[0] == "?":
context.spinner.stop()
int_user_ids = pick_multiple_user_ids(context, users_tbl, active,
companyid)
else:
for userid in userids:
if isinstance(userid, six.integer_types):
pass
elif isinstance(userid, six.string_types):
try:
userid = int(userid)
except ValueError:
raise click.ClickException('UserID must be integer. '
'"%s" cannot be mapped to '
'integer' % userid)
else:
raise click.ClickException('List of User Ids invalid')
try:
users_tbl[userid]
except KeyError as ke:
raise click.ClickException('User ID %s not in database. '
'Exception: %s: %s' %
(userid,
ke.__class__.__name__, ke))
int_user_ids.append(userid)
context.spinner.start()
return int_user_ids
if int_user_ids == []:
click.echo("No users selected.")
context.spinner.start()
return int_user_ids
def pick_userid(context, users_tbl):
"""
Interactive selection of target id from list presented on the console.
Parameters ()
The click_context which contains target data information.
Returns:
target_id selected or None if user enter ctrl-C
"""
users_keys = users_tbl.keys()
display_options = [build_userid_display(key, users_tbl[key])
for key in users_keys]
try:
index = pick_from_list(context, display_options, "Pick UserID:")
except ValueError:
pass
if index is None:
click.echo('Abort command')
return None
return users_keys[index]
def get_userid(context, users_tbl, userid, options=None):
"""
Get the user based on the value of userid or the value of the
interactive option. If userid is an
integer, it directly and generate exception if this fails.
If it is ? use the interactive pick_target_id.
If options exist test for 'interactive' option and if set, call
pick_target_id
This function executes the pick function if the userid is "?" or
if options is not None and the interactive flag is set
This support function always tests the userid to against the
targets table.
Returns:
Returns integer user_id of a valid targetstbl TargetID
raises:
KeyError if user_id not in table
"""
context.spinner.stop()
if options and 'interactive' in options and options['interactive']:
context.spinner.stop()
userid = pick_userid(context, users_tbl)
elif isinstance(userid, six.integer_types):
try:
userid = users_tbl[userid]
context.spinner.start()
return userid
except KeyError as ke:
raise click.ClickException("UserID %s not valid: exception %s" %
(userid, ke))
elif isinstance(userid, six.string_types):
if userid == "?":
context.spinner.stop()
userid = pick_userid(context, users_tbl)
else:
try:
userid = int(userid)
except ValueError:
raise click.ClickException('UserID must be integer or "?" '
'not %s' % userid)
try:
# test if userid in table
users_tbl[userid] # pylint: disable=pointless-statement
context.spinner.start()
return userid
except KeyError as ke:
raise click.ClickException("UserID %s not found in Users "
"table: exception %s" %
(userid, ke))
else:
raise click.ClickException('UserID %s. Requires UserID, ?' % userid)
if userid is None:
click.echo("Operation aborted by user.")
context.spinner.start()
return userid
@cli.group('users', options_metavar=CMD_OPTS_TXT)
def users_group():
"""
Command group to handle users table.
Includes subcommands to list entries in the database users table
and to create, modify, delete specific entries.
"""
pass
@users_group.command('add', options_metavar=CMD_OPTS_TXT)
@click.option('-f', '--firstname', type=str,
required=True,
help='User first name.')
@click.option('-l', '--lastname', type=str,
default=None,
required=True,
help='User last name')
@click.option('-e', '--email', type=str,
required=True,
help='User email address.')
@click.option('-c', '--companyID', type=str,
default=None,
required=True,
help='CompanyID for the company attached to this user. Enter ? '
'to use selection list to get company id')
@click.option('--inactive', default=False, is_flag=True,
help='Set the active/inactive state in the database for this '
'user. An inactive user is ignored. Default is active')
@click.option('--disable', default=False, is_flag=True,
help='Disable notifications in the database for this '
'user. Default is enabled')
@add_options(no_verify_option)
@click.pass_obj
def users_add(context, **options): # pylint: disable=redefined-builtin
"""
Add a new user in the user table.
Creates a new user with the defined parameters for the company defined
by the required parameter companyID.
Verification that the operation is correct is requested before the change
is executed unless the `--no-verify' parameter is set.
Examples:
smicli users add -f John -l Malia -e <EMAIL> -c ?
Defines a new user with name and email defined after using select list
to get companyID of the user. A prompt for verification is presented
before the database is changed.
"""
context.execute_cmd(lambda: cmd_users_add(context, options))
@users_group.command('list', options_metavar=CMD_OPTS_TXT)
@click.option('-f', '--fields', multiple=True, type=str, default=None,
metavar='FIELDNAME',
help='Define specific fields for output. UserID always '
'included. Multiple fields can be specified by repeating '
'the option. (Default: predefined list of fields).'
'\nEnter: "-f ?" to interactively select fields for display.'
'\nEx. "-f UserID -f CompanyName"')
@click.option('-d', '--disabled', default=False, is_flag=True, required=False,
help='Include disabled users. Otherwise only users that are '
'set enabled in the database are shown.'
'(Default:Do not show disabled users).')
@click.option('-o', '--order', type=str, default=None, metavar='FIELDNAME',
help='Sort by the defined field name. Names are viewed with the '
'targets fields subcommand or "-o ?" to interactively '
'select field for sort')
@click.option('-c', '--companyid', type=str, default=None, metavar='COMPANYID',
help='Filter the list to just users with the defined companyID. '
'This field may be selected interactively by entering "?".')
@click.pass_obj
def users_list(context, **options): # pylint: disable=redefined-builtin
"""
List users in the database users table.
Lists the information on users in the users table in a table format, one
user per row. Options allow selecting specific fields of the table (the
fields in the table can be viewed with the fields subcommand) and ordering
the ouput with a field name. Unless the --disabled option is set, only
active users are shown in the output.
The --companyid option allows selecting only users for a particular company
for the list.
The default field list is:
UserID, FirstName, Lastname, Email, CompanyName, Active, Notify
Examples:
smicli users list # default list of all users
smicli users list -c ? # Presents a list of companies for user to
# select a company and then lists users for
# that company
smicli users list -f Email -o Email # list with UserId and Email fields
# in output table.
"""
context.execute_cmd(lambda: cmd_users_list(context, options))
@users_group.command('delete', options_metavar=CMD_OPTS_TXT)
@click.argument('UserID', type=str, metavar='UserID', required=False)
@click.option('-n', '--no-verify', is_flag=True, default=False,
help='Disable verification prompt before the delete is '
'executed.')
@click.pass_obj
def users_delete(context, userid, **options):
"""
Delete a user from the database.
Delete the user defined by the subcommand argument from the
database.
The user to be deleted may be specified by a) specific user id, b) using
'?' as the user id argument which also initiates the interactive mode
options
Examples:
smicli delete 85
smicli delete ?
"""
context.execute_cmd(lambda: cmd_users_delete(context, userid, options))
@users_group.command('modify', options_metavar=CMD_OPTS_TXT)
@click.argument('UserID', type=str, metavar='UserID', required=False)
@click.option('-f', '--firstname', type=str,
required=False,
help='User first name.')
@click.option('-l', '--lastname', type=str,
default=None,
required=False,
help='User last name')
@click.option('-e', '--email', type=str,
required=False,
help='User email address.')
@click.option('-c', '--CompanyID', type=int, default=None, required=False,
help='CompanyID for the company attached to this user')
@click.option('--no_notifications', is_flag=True, default=False,
help='Disable the notify state in the database for this '
'user if this flag set.')
@click.option('-n', '--no-verify', is_flag=True, default=False,
help='Disable verification prompt before the change is '
'executed.')
@click.pass_obj
def users_modify(context, userid, **options):
"""
Modify fields of a user in the user database.
This allows modifications of the fields for a particular specified by
the user id on input.
The user to be modified may be specified by a) specific user id, b) the
interactive mode option, or c) using '?' as the user id argument which also
initiates the interactive mode options
ex. smicli users modify 9 -n fred
# changes the first name of the user with user id 9.
"""
context.execute_cmd(lambda: cmd_users_modify(context, userid, options))
@users_group.command('activate', options_metavar=CMD_OPTS_TXT)
@click.argument('UserIDs', type=str, metavar='UserIDs',
required=False, nargs=-1)
@click.option('--active/--inactive', default=False, required=False,
help='Set the active/inactive state in the database for this '
'user. Default is to attempt set user to inactive.')
@click.option('-n', '--no-verify', is_flag=True, default=False,
help='Disable verification prompt before the operation is '
'executed.')
@click.option('-c', '--companyid', type=str, default=None, metavar='COMPANYID',
help='Limit the list of users from which to select by the '
'companyid provided')
@click.pass_obj
def users_activate(context, userids, **options):
"""
Activate or deactivate multiple users.
This sets the users defined by the userids argument to either active
or inactive. When a user is inactive they are no longer shown in
tables that involve user information such as the weekly report.
The users to be activated or deactivated may be specified by a) specific
user ids, b) the interactive mode option, or c) using '?' as the user id
argument which also initiates the interactive mode options.
Each user selected activated separately and users already in the target
state are bypassed. If the --no-verify option is not set each user to be
changed causes a verification request before the change.
Examples:
smicli users activate ? --inactive # list all users for select and
# deactivate the selected users
smicli user activate ? --active -c ? # first creates selection list
# to select company. Then
# creates select list for that
# company and activates the
# selected users.
"""
context.execute_cmd(lambda: cmd_users_activate(context, userids, options))
@users_group.command('fields', options_metavar=CMD_OPTS_TXT)
@click.pass_obj
def users_fields(context):
"""
Display field names in users database.
Example:
smicli users list fields
"""
context.execute_cmd(lambda: cmd_users_fields(context))
######################################################################
#
# Action functions
#
######################################################################
def display_cols(users_tbl, fields, show_disabled=True, companyid=None,
order=None, output_format=None):
"""
Display the columns of data defined by the fields parameter.
This gets the data from the targets data based on the col_list and prepares
and displays a table based on those targets_tbl colums.
Parameters:
fields: list of strings defining the targets_data columns to be
displayed.
target_table: The targets table from the database
order (:term: `string`): None or name of field upon which the table will
be sorted for output
show_disabled(:class:`py:bool`)
If True, show disabled entries. If not True, entries marked disabled
are ignored
"""
if show_disabled:
user_ids = sorted(users_tbl.keys())
else:
user_ids = sorted(users_tbl.get_active_usersids())
# If order defined check to see if valid field
if order:
if order not in users_tbl.all_fields:
raise click.ClickException("--order option defines invalid field %s"
% order)
# create dictionary with order value as key and list of targetids as
# value. List because the order fields are not unique
order_dict = defaultdict(list)
for userid in user_ids:
order_dict[users_tbl[userid][order]].append(userid)
# order_dict = {target_table[targetid][order]: targetid
# for targetid in target_ids}
# TODO this may be inefficient means to sort by keys and get values
# into list
user_ids = []
for key in sorted(order_dict.keys()):
user_ids.extend(order_dict[key])
rows = []
for userid in user_ids:
if companyid:
if users_tbl[userid]['CompanyID'] != companyid:
continue
rows.append(users_tbl.format_record(userid, fields))
headers = users_tbl.tbl_hdr(fields)
title = 'User Overview: %s:' % datetime_display_str()
if show_disabled:
title = '%s includes disabled users' % title
if companyid:
title = '; %s for company=%s' % (title, companyid)
print_table(rows, headers=headers, title=title,
table_format=output_format)
STANDARD_FIELDS_DISPLAY_LIST = ['UserID', 'FirstName', 'Lastname', 'Email',
'CompanyName', 'Active', 'Notify']
def display_table(users_tbl, fields=None, companyid=None, order=None,
show_disabled=True, output_format=None):
"""Display entries in the base either all or limited by companyid. If
fields does not exist, display a standard list of fields from the
database.
"""
if not fields:
# list of default fields for display
fields = STANDARD_FIELDS_DISPLAY_LIST
else:
fields = fields
display_cols(users_tbl, fields, show_disabled=show_disabled, order=order,
companyid=companyid, output_format=output_format)
def _test_active(options):
"""
Test the activate options. This has 3 possible values.
"""
if options['active']:
activate = True
elif options['inactive']:
activate = False
else:
activate = None
return activate
def _test_notify(options):
"""
Test the activate options. This has 3 possible values.
"""
if options['enable']:
notify = True
elif options['disable']:
notify = False
else:
notify = None
return notify
def cmd_users_fields(context):
"""Display the information fields for the providers dictionary."""
rows = [[field] for field in UsersTable.all_fields]
headers = 'User Fields'
context.spinner.stop()
print_table(rows, headers, title='User table fields from database and '
'joins:',
table_format=context.output_format)
def cmd_users_list(context, options):
"""
List users from the users table in a flexible format based on the
options
"""
fields = list(options['fields'])
users_tbl = UsersTable.factory(context.db_info, context.db_type,
context.verbose)
# TODO. For now this is hidden capability. Need to make public
# Entering all as first field name causes all fields to be used.
if fields and fields[0] == 'all':
fields = users_tbl.all_fields[:]
# TODO modify all this so we get name field as standard
headers = UsersTable.all_fields[:]
tbl_rows = []
for userid, user in six.iteritems(users_tbl):
row = [user[field] for field in headers]
tbl_rows.append(row)
field_selects = users_tbl.all_fields[:]
# TODO This is temp since we really want companyname but that
# is not part of normal fields but from join.
if 'CompanyID' in field_selects:
field_selects.remove('CompanyID')
if 'CompanyName' not in field_selects:
field_selects.append('CompanyName')
if fields:
if fields[0] == "?":
indexes = pick_multiple_from_list(context, field_selects,
"Select fields to report")
if not indexes:
click.echo("Abort cmd, no fields selected")
return
fields = [users_tbl.fields[index] for index in indexes]
if 'UserID' not in fields:
fields.insert(0, 'UserID') # always show UserID
if 'order' in options and options['order'] == "?":
index = pick_from_list(context, field_selects, "Select field for order")
order = users_tbl.fields[index]
else:
order = options['order']
if 'companyid' in options:
companyid = select_companyid(context, options['companyid'])
else:
companyid = options['companyid']
for field in fields:
if field not in users_tbl.all_fields:
raise click.ClickException("Invalid field name: %s" % field)
context.spinner.stop()
try:
display_table(users_tbl, list(fields),
show_disabled=options['disabled'], order=order,
companyid=companyid,
output_format=context.output_format)
except Exception as ex:
raise click.ClickException("%s: %s" % (ex.__class__.__name__, ex))
def cmd_users_add(context, options):
"""
Add a new user to the table.
"""
test_db_updates_allowed()
first_name = options['firstname']
last_name = options['lastname']
email = options['email']
companyid = options['companyid']
users_tbl = UsersTable.factory(context.db_info, context.db_type,
context.verbose)
companies_tbl = CompaniesTable.factory(context.db_info, context.db_type,
context.verbose)
active = not options['inactive']
notify = not options['disable']
if companyid == "?":
companyid = get_companyid(context, companies_tbl, companyid, None)
if companyid is None:
return
if companyid not in companies_tbl:
raise click.ClickException('The companyID %s is not a valid companyID '
'in companies table' % companyid)
company = companies_tbl[companyid]['CompanyName']
if not options['no_verify']:
context.spinner.stop()
click.echo('Adding %s %s in company=%s(%s), email=%s' %
(first_name, last_name, company, companyid, email))
if validate_prompt('Validate add this user?'):
pass
else:
click.echo('Aborted Operation')
return
try:
users_tbl.insert(first_name, last_name, email, companyid,
active=active,
notify=notify)
except MySQLError as ex:
raise click.ClickException("DB INSERT Error %s: %s" %
(ex.__class__.__name__, ex))
def cmd_users_delete(context, userid, options):
"""Delete a user from the database."""
test_db_updates_allowed()
users_tbl = UsersTable.factory(context.db_info, context.db_type,
context.verbose)
userid = get_userid(context, users_tbl, userid, options)
if userid is None:
return
if userid not in users_tbl:
raise click.ClickException('The UserID %s is not in the table' %
userid)
user = users_tbl[userid]
targets_with_user = []
for targetid in context.targets_tbl:
notify_users = context.targets_tbl.get_notifyusers(targetid)
if notify_users and userid in notify_users:
targets_with_user.append(context.targets_tbl[targetid])
if targets_with_user:
for target in targets_with_user:
click.echo("User referenced in targets targetID: %s, company: "
"%s IPAddress: %s" %
(target['TargetID'], target['CompanyName'],
target['IPAddress']))
targets = ['%s;%s' % (target['TargetID'], target['CompanyName'])
for target in targets_with_user]
raise click.ClickException('Cannot delete UserID %s. It is used in '
'Targets.NotifyUsers %s' %
(userid, ", ".join(targets)))
# TODO we really want to delete the entries if user wants to
if not options['no_verify']:
context.spinner.stop()
click.echo('id=%s %s %s; %s' % (userid, user['FirstName'],
user['Lastname'], user['Email']))
if not validate_prompt('Validate delete this user?'):
click.echo('Aborted Operation')
return
context.spinner.stop()
try:
users_tbl.delete(userid)
except MySQLError as ex:
click.echo("Change failed, Database Error Exception: %s: %s"
% (ex.__class__.__name__, ex))
def cmd_users_modify(context, userid, options):
"""Modify selected fields of a user in the database."""
test_db_updates_allowed()
users_tbl = UsersTable.factory(context.db_info, context.db_type,
context.verbose)
userid = get_userid(context, users_tbl, userid, options)
if userid is None:
return
changes = {}
changes['FirstName'] = options.get('firstname', None)
changes['Lastname'] = options.get('lastname', None)
changes['Email'] = options.get('email', None)
changes['CompanyID'] = options.get('companyid', None)
for key, value in changes.items():
if value is None:
del changes[key]
if not changes:
click.echo('No changes requested')
return
user_record = users_tbl[userid]
if not options['no_verify']:
context.spinner.stop()
click.echo('Proposed changes for id: %s, %s %s, email: %s:' %
(userid, user_record['FirstName'],
user_record['Lastname'],
user_record['Email']))
for key, value in changes.items():
click.echo(' %s: "%s" to "%s"' % (key,
user_record[key],
value))
if not validate_prompt('Modify user id %s' % userid):
click.echo('Operation aborted by user.')
return
context.spinner.stop()
try:
users_tbl.update_fields(userid, changes)
except MySQLError as ex:
click.echo("Change failed, Database Error Exception: %s: %s"
% (ex.__class__.__name__, ex))
return
def cmd_users_activate(context, userids, options):
"""
Set the user active flag if change required for the listed users
"""
test_db_updates_allowed()
users_tbl = UsersTable.factory(context.db_info, context.db_type,
context.verbose)
userids = get_multiple_user_ids(context, userids, users_tbl, options,
companyid=options['companyid'])
if userids is None:
return
for userid in userids:
try:
users_tbl[userid] # noqa: F841
except Exception as ex:
raise click.ClickException('Invalid UserId=%s. Not in database. '
'%s: %s' % (id,
ex.__class__.__name__, ex))
context.spinner.stop()
for userid in userids:
usr_item = users_tbl[userid]
is_active = users_tbl.is_active(userid)
active_flag = options['active']
if userid in users_tbl:
if active_flag and is_active:
click.echo('User %s already active' % userid)
continue
elif not active_flag and not is_active:
click.echo('User %s already inactive' % userid)
continue
else:
if not options['no_verify']:
first_name = usr_item['FirstName']
last_name = usr_item['Lastname']
email = usr_item['Email']
companyid = usr_item['CompanyID']
company_name = usr_item['CompanyName']
click.echo('Setting %s %s in company=%s(%s), email=%s' %
(first_name, last_name, company_name, companyid,
email))
if validate_prompt('Validate change this user?'):
pass
else:
click.echo('Abort this change')
continue
try:
users_tbl.activate(userid, active_flag)
except MySQLError as ex:
click.echo('Activate failed, Database Error Exception: '
'%s: %s' % (ex.__class__.__name__, ex))
return
active_flag = users_tbl.is_active(userid)
click.echo('User %s set %s' % (userid,
users_tbl.is_active_str(userid)))
| StarcoderdataPython |
13579 | # Generated by Django 2.1.10 on 2019-07-19 12:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('cms_content', '0003_auto_20190719_1232'),
]
operations = [
migrations.AlterModelOptions(
name='element',
options={'ordering': ['position'], 'verbose_name': 'Element', 'verbose_name_plural': 'Element'},
),
migrations.AddField(
model_name='container',
name='content_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType', verbose_name='Content type'),
),
migrations.AddField(
model_name='container',
name='object_id',
field=models.PositiveIntegerField(blank=True, null=True, verbose_name='Object ID'),
),
]
| StarcoderdataPython |
17122 | <filename>vitrage/tests/unit/datasources/kubernetes/test_kubernetes_transformer.py
# Copyright 2018 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from testtools import matchers
from vitrage.common.constants import DatasourceAction
from vitrage.common.constants import DatasourceOpts as DSOpts
from vitrage.common.constants import DatasourceProperties as DSProps
from vitrage.common.constants import EntityCategory
from vitrage.common.constants import GraphAction
from vitrage.common.constants import UpdateMethod
from vitrage.common.constants import VertexProperties as VProps
from vitrage.datasources.kubernetes.properties import KUBERNETES_DATASOURCE
from vitrage.datasources.kubernetes.properties import KubernetesProperties \
as kubProp
from vitrage.datasources.kubernetes.transformer import KubernetesTransformer
from vitrage.datasources.nova.instance import NOVA_INSTANCE_DATASOURCE
from vitrage.datasources.nova.instance.transformer import InstanceTransformer
from vitrage.datasources import transformer_base as tbase
from vitrage.datasources.transformer_base import TransformerBase
from vitrage.tests import base
from vitrage.tests.mocks import mock_driver as mock_sync
from vitrage.tests.mocks import utils
LOG = logging.getLogger(__name__)
cluster_name = 'kubernetes'
class KubernetesTransformerTest(base.BaseTest):
OPTS = [
cfg.StrOpt(DSOpts.UPDATE_METHOD,
default=UpdateMethod.PULL),
cfg.StrOpt(DSOpts.CONFIG_FILE,
default=utils.get_resources_dir() +
'/kubernetes/kubernetes_config.yaml'),
]
# noinspection PyAttributeOutsideInit,PyPep8Naming
@classmethod
def setUpClass(cls):
super(KubernetesTransformerTest, cls).setUpClass()
cls.transformers = {}
cls.transformers[KUBERNETES_DATASOURCE] = KubernetesTransformer(
cls.transformers)
cls.transformers[NOVA_INSTANCE_DATASOURCE] = \
InstanceTransformer(cls.transformers)
def setUp(self):
super(KubernetesTransformerTest, self).setUp()
self.conf_reregister_opts(self.OPTS, group=KUBERNETES_DATASOURCE)
def test_snapshot_event_transform(self):
LOG.debug('Test tactual transform action for '
'snapshot and snapshot init events')
k8s_spec_list = \
mock_sync.simple_k8s_nodes_generators(nodes_num=2,
snapshot_events=1)
nodes_events = mock_sync.generate_random_events_list(k8s_spec_list)
for event in nodes_events:
k8s_wrapper = self.transformers[KUBERNETES_DATASOURCE].transform(
event)
# Test assertions
self.assertEqual(cluster_name, k8s_wrapper.vertex[VProps.NAME])
n_length = str(len(k8s_wrapper.neighbors))
self.assertThat(n_length, matchers.HasLength(1),
'Cluster vertex has one neighbor')
self._validate_cluster_neighbors(k8s_wrapper.neighbors, event)
datasource_action = event[DSProps.DATASOURCE_ACTION]
if datasource_action == DatasourceAction.INIT_SNAPSHOT:
self.assertEqual(GraphAction.CREATE_ENTITY, k8s_wrapper.action)
elif datasource_action == DatasourceAction.SNAPSHOT:
self.assertEqual(GraphAction.UPDATE_ENTITY, k8s_wrapper.action)
def test_build_cluster_key(self):
LOG.debug('Test build cluster key')
# Test setup
expected_key = 'RESOURCE:kubernetes:kubernetes'
instance_transformer = self.transformers[NOVA_INSTANCE_DATASOURCE]
# Test action
key_fields = instance_transformer._key_values(
KUBERNETES_DATASOURCE,
cluster_name)
# Test assertions
observed_key = tbase.build_key(key_fields)
self.assertEqual(expected_key, observed_key)
def _validate_cluster_neighbors(self, neighbor, event):
# Create expected neighbor
time = event[DSProps.SAMPLE_DATE]
external_id = event['resources'][0][kubProp.EXTERNALID]
properties = {
VProps.ID: external_id,
VProps.VITRAGE_TYPE: NOVA_INSTANCE_DATASOURCE,
VProps.VITRAGE_CATEGORY: EntityCategory.RESOURCE,
VProps.VITRAGE_SAMPLE_TIMESTAMP: time
}
nova_instance_tran = self.transformers[NOVA_INSTANCE_DATASOURCE]
expected_neighbor = \
nova_instance_tran.create_neighbor_placeholder_vertex(**properties)
self.assertEqual(expected_neighbor, neighbor[0].vertex)
# Validate neighbor edge
edge = neighbor[0].edge
entity_key = \
self.transformers[KUBERNETES_DATASOURCE]._create_entity_key(event)
entity_uuid = \
TransformerBase.uuid_from_deprecated_vitrage_id(entity_key)
self.assertEqual(edge.source_id, entity_uuid)
self.assertEqual(edge.target_id, neighbor[0].vertex.vertex_id)
def test_create_entity_key(self):
LOG.debug('Test get key from kubernetes transformer')
# Test setup
spec_list = mock_sync.simple_k8s_nodes_generators(nodes_num=1,
snapshot_events=1)
nodes_events = mock_sync.generate_random_events_list(spec_list)
kubernetes_transformer = self.transformers[KUBERNETES_DATASOURCE]
for event in nodes_events:
# Test action
observed_key = kubernetes_transformer._create_entity_key(event)
# Test assertions
observed_key_fields = observed_key.split(
TransformerBase.KEY_SEPARATOR)
self.assertEqual(EntityCategory.RESOURCE, observed_key_fields[0])
self.assertEqual(
KUBERNETES_DATASOURCE,
observed_key_fields[1]
)
key_values = kubernetes_transformer._key_values(
KUBERNETES_DATASOURCE,
cluster_name)
expected_key = tbase.build_key(key_values)
self.assertEqual(expected_key, observed_key)
| StarcoderdataPython |
1648994 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/02/25 16:30
# @Author : niuliangtao
# @Site :
# @File : MachineLearninginAction.py
# @Software: PyCharm
import re
import urllib
import requests
from bs4 import BeautifulSoup
github_root = "https://github.com"
github_raw = "https://raw.githubusercontent.com"
s1 = "https://raw.githubusercontent.com/apachecn/MachineLearning/python-2.7/LICENSE"
s2 = "https://raw.githubusercontent.com/apachecn/MachineLearning/blob/python-2.7/LICENSE"
def downLoadFile(url, title):
url = url.replace("/blob", "")
print("download file FROM \t" + url + "\t to \t" + title)
if re.search("(jpg|png|jpeg)", title):
urllib.urlretrieve(github_raw + url, title)
#
# r = requests.get(github_raw + url)
# with open(title, 'wb') as f:
# f.write(r.content)
else:
url = github_raw + url
print (url)
req = requests.get(url)
soup = BeautifulSoup(req.content.decode("gbk", "ignore"), 'lxml')
with open(title, 'wb') as f:
f.write(soup.find("p").text.decode("utf8", "ignore"))
def getPath(url):
req = requests.get(url)
soup = BeautifulSoup(req.content.decode('gbk', 'ignore'), 'lxml')
# soup = BeautifulSoup(req.content, 'lxml')
tables = soup.find("table", "files js-navigation-container js-active-navigation-container")
for file_wrap in tables.find_all("tr", "js-navigation-item"):
context = file_wrap.find("a", "js-navigation-open")
path = context.attrs["href"]
title = context.text
print (path + "\t" + title)
if 'octicon-file-directory' in file_wrap.find("td", "icon").find("svg").attrs["class"]:
print "directory"
elif 'octicon-file' in file_wrap.find("svg").attrs["class"]:
print "file"
downLoadFile(path, title)
if __name__ == '__main__':
url = "https://github.com/apachecn/MachineLearning"
getPath(url=url)
| StarcoderdataPython |
3356042 | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import troposphere as t
import yaml
from awacs import iam as awscs_iam
from troposphere import codebuild
from troposphere import codecommit
from troposphere import codepipeline
from troposphere import iam
from troposphere import s3
from troposphere import sns
from troposphere import sqs
from troposphere import ssm
from servicecatalog_puppet import constants
def get_template(
puppet_version,
all_regions,
source,
is_caching_enabled,
is_manual_approvals: bool,
scm_skip_creation_of_repo: bool,
should_validate: bool,
) -> t.Template:
is_codecommit = source.get("Provider", "").lower() == "codecommit"
is_github = source.get("Provider", "").lower() == "github"
is_codestarsourceconnection = (
source.get("Provider", "").lower() == "codestarsourceconnection"
)
is_custom = source.get("Provider", "").lower() == "custom"
is_s3 = source.get("Provider", "").lower() == "s3"
description = f"""Bootstrap template used to bring up the main ServiceCatalog-Puppet AWS CodePipeline with dependencies
{{"version": "{puppet_version}", "framework": "servicecatalog-puppet", "role": "bootstrap-master"}}"""
template = t.Template(Description=description)
version_parameter = template.add_parameter(
t.Parameter("Version", Default=puppet_version, Type="String")
)
org_iam_role_arn_parameter = template.add_parameter(
t.Parameter("OrgIamRoleArn", Default="None", Type="String")
)
with_manual_approvals_parameter = template.add_parameter(
t.Parameter(
"WithManualApprovals",
Type="String",
AllowedValues=["Yes", "No"],
Default="No",
)
)
puppet_code_pipeline_role_permission_boundary_parameter = template.add_parameter(
t.Parameter(
"PuppetCodePipelineRolePermissionBoundary",
Type="String",
Description="IAM Permission Boundary to apply to the PuppetCodePipelineRole",
Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data,
)
)
source_role_permissions_boundary_parameter = template.add_parameter(
t.Parameter(
"SourceRolePermissionsBoundary",
Type="String",
Description="IAM Permission Boundary to apply to the SourceRole",
Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data,
)
)
puppet_generate_role_permission_boundary_parameter = template.add_parameter(
t.Parameter(
"PuppetGenerateRolePermissionBoundary",
Type="String",
Description="IAM Permission Boundary to apply to the PuppetGenerateRole",
Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data,
)
)
puppet_deploy_role_permission_boundary_parameter = template.add_parameter(
t.Parameter(
"PuppetDeployRolePermissionBoundary",
Type="String",
Description="IAM Permission Boundary to apply to the PuppetDeployRole",
Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data,
)
)
puppet_provisioning_role_permissions_boundary_parameter = template.add_parameter(
t.Parameter(
"PuppetProvisioningRolePermissionsBoundary",
Type="String",
Description="IAM Permission Boundary to apply to the PuppetProvisioningRole",
Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data,
)
)
cloud_formation_deploy_role_permissions_boundary_parameter = template.add_parameter(
t.Parameter(
"CloudFormationDeployRolePermissionsBoundary",
Type="String",
Description="IAM Permission Boundary to apply to the CloudFormationDeployRole",
Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data,
)
)
deploy_environment_compute_type_parameter = template.add_parameter(
t.Parameter(
"DeployEnvironmentComputeType",
Type="String",
Description="The AWS CodeBuild Environment Compute Type",
Default="BUILD_GENERAL1_SMALL",
)
)
spoke_deploy_environment_compute_type_parameter = template.add_parameter(
t.Parameter(
"SpokeDeployEnvironmentComputeType",
Type="String",
Description="The AWS CodeBuild Environment Compute Type for spoke execution mode",
Default="BUILD_GENERAL1_SMALL",
)
)
deploy_num_workers_parameter = template.add_parameter(
t.Parameter(
"DeployNumWorkers",
Type="Number",
Description="Number of workers that should be used when running a deploy",
Default=10,
)
)
puppet_role_name_parameter = template.add_parameter(
t.Parameter("PuppetRoleName", Type="String", Default="PuppetRole")
)
puppet_role_path_template_parameter = template.add_parameter(
t.Parameter("PuppetRolePath", Type="String", Default="/servicecatalog-puppet/")
)
template.add_condition(
"ShouldUseOrgs", t.Not(t.Equals(t.Ref(org_iam_role_arn_parameter), "None"))
)
template.add_condition(
"HasManualApprovals", t.Equals(t.Ref(with_manual_approvals_parameter), "Yes")
)
template.add_resource(
s3.Bucket(
"StacksRepository",
BucketName=t.Sub("sc-puppet-stacks-repository-${AWS::AccountId}"),
VersioningConfiguration=s3.VersioningConfiguration(Status="Enabled"),
BucketEncryption=s3.BucketEncryption(
ServerSideEncryptionConfiguration=[
s3.ServerSideEncryptionRule(
ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(
SSEAlgorithm="AES256"
)
)
]
),
PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(
BlockPublicAcls=True,
BlockPublicPolicy=True,
IgnorePublicAcls=True,
RestrictPublicBuckets=True,
),
Tags=t.Tags({"ServiceCatalogPuppet:Actor": "Framework"}),
)
)
manual_approvals_param = template.add_resource(
ssm.Parameter(
"ManualApprovalsParam",
Type="String",
Name="/servicecatalog-puppet/manual-approvals",
Value=t.Ref(with_manual_approvals_parameter),
)
)
template.add_resource(
ssm.Parameter(
"SpokeDeployEnvParameter",
Type="String",
Name=constants.SPOKE_EXECUTION_MODE_DEPLOY_ENV_PARAMETER_NAME,
Value=t.Ref(spoke_deploy_environment_compute_type_parameter),
)
)
param = template.add_resource(
ssm.Parameter(
"Param",
Type="String",
Name="service-catalog-puppet-version",
Value=t.Ref(version_parameter),
)
)
partition_parameter = template.add_resource(
ssm.Parameter(
"PartitionParameter",
Type="String",
Name="/servicecatalog-puppet/partition",
Value=t.Ref("AWS::Partition"),
)
)
puppet_role_name_parameter = template.add_resource(
ssm.Parameter(
"PuppetRoleNameParameter",
Type="String",
Name="/servicecatalog-puppet/puppet-role/name",
Value=t.Ref(puppet_role_name_parameter),
)
)
puppet_role_path_parameter = template.add_resource(
ssm.Parameter(
"PuppetRolePathParameter",
Type="String",
Name="/servicecatalog-puppet/puppet-role/path",
Value=t.Ref(puppet_role_path_template_parameter),
)
)
share_accept_function_role = template.add_resource(
iam.Role(
"ShareAcceptFunctionRole",
RoleName="ShareAcceptFunctionRole",
ManagedPolicyArns=[
t.Sub(
"arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"
)
],
Path=t.Ref(puppet_role_path_template_parameter),
Policies=[
iam.Policy(
PolicyName="ServiceCatalogActions",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": ["sts:AssumeRole"],
"Resource": {
"Fn::Sub": "arn:${AWS::Partition}:iam::*:role${PuppetRolePath}${PuppetRoleName}"
},
"Effect": "Allow",
}
],
},
)
],
AssumeRolePolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {"Service": ["lambda.amazonaws.com"]},
}
],
},
)
)
provisioning_role = template.add_resource(
iam.Role(
"ProvisioningRole",
RoleName="PuppetProvisioningRole",
AssumeRolePolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {"Service": ["codebuild.amazonaws.com"]},
},
{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {"AWS": {"Fn::Sub": "${AWS::AccountId}"}},
},
],
},
ManagedPolicyArns=[
t.Sub("arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess")
],
PermissionsBoundary=t.Ref(
puppet_provisioning_role_permissions_boundary_parameter
),
Path=t.Ref(puppet_role_path_template_parameter),
)
)
cloud_formation_deploy_role = template.add_resource(
iam.Role(
"CloudFormationDeployRole",
RoleName="CloudFormationDeployRole",
AssumeRolePolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {"Service": ["cloudformation.amazonaws.com"]},
},
{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {"AWS": {"Fn::Sub": "${AWS::AccountId}"}},
},
],
},
ManagedPolicyArns=[
t.Sub("arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess")
],
PermissionsBoundary=t.Ref(
cloud_formation_deploy_role_permissions_boundary_parameter
),
Path=t.Ref(puppet_role_path_template_parameter),
)
)
pipeline_role = template.add_resource(
iam.Role(
"PipelineRole",
RoleName="PuppetCodePipelineRole",
AssumeRolePolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {"Service": ["codepipeline.amazonaws.com"]},
}
],
},
ManagedPolicyArns=[
t.Sub("arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess")
],
PermissionsBoundary=t.Ref(
puppet_code_pipeline_role_permission_boundary_parameter
),
Path=t.Ref(puppet_role_path_template_parameter),
)
)
source_role = template.add_resource(
iam.Role(
"SourceRole",
RoleName="PuppetSourceRole",
AssumeRolePolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {"Service": ["codepipeline.amazonaws.com"]},
},
{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {
"AWS": {
"Fn::Sub": "arn:${AWS::Partition}:iam::${AWS::AccountId}:root"
}
},
},
],
},
ManagedPolicyArns=[
t.Sub("arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess")
],
PermissionsBoundary=t.Ref(source_role_permissions_boundary_parameter),
Path=t.Ref(puppet_role_path_template_parameter),
)
)
dry_run_notification_topic = template.add_resource(
sns.Topic(
"DryRunNotificationTopic",
DisplayName="service-catalog-puppet-dry-run-approvals",
TopicName="service-catalog-puppet-dry-run-approvals",
Condition="HasManualApprovals",
)
)
deploy_role = template.add_resource(
iam.Role(
"DeployRole",
RoleName="PuppetDeployRole",
AssumeRolePolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {"Service": ["codebuild.amazonaws.com"]},
}
],
},
ManagedPolicyArns=[
t.Sub("arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess")
],
PermissionsBoundary=t.Ref(puppet_deploy_role_permission_boundary_parameter),
Path=t.Ref(puppet_role_path_template_parameter),
)
)
num_workers_ssm_parameter = template.add_resource(
ssm.Parameter(
"NumWorkersSSMParameter",
Type="String",
Name="/servicecatalog-puppet/deploy/num-workers",
Value=t.Sub("${DeployNumWorkers}"),
)
)
parameterised_source_bucket = template.add_resource(
s3.Bucket(
"ParameterisedSourceBucket",
PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(
IgnorePublicAcls=True,
BlockPublicPolicy=True,
BlockPublicAcls=True,
RestrictPublicBuckets=True,
),
BucketEncryption=s3.BucketEncryption(
ServerSideEncryptionConfiguration=[
s3.ServerSideEncryptionRule(
ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(
SSEAlgorithm="AES256"
)
)
]
),
Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}),
BucketName=t.Sub("sc-puppet-parameterised-runs-${AWS::AccountId}"),
VersioningConfiguration=s3.VersioningConfiguration(Status="Enabled"),
)
)
source_stage = codepipeline.Stages(
Name="Source",
Actions=[
codepipeline.Actions(
RunOrder=1,
RoleArn=t.GetAtt("SourceRole", "Arn"),
ActionTypeId=codepipeline.ActionTypeId(
Category="Source", Owner="AWS", Version="1", Provider="S3",
),
OutputArtifacts=[
codepipeline.OutputArtifacts(Name="ParameterisedSource")
],
Configuration={
"S3Bucket": t.Ref(parameterised_source_bucket),
"S3ObjectKey": "parameters.zip",
"PollForSourceChanges": True,
},
Name="ParameterisedSource",
)
],
)
install_spec = {
"runtime-versions": dict(python="3.7"),
"commands": [
f"pip install {puppet_version}"
if "http" in puppet_version
else f"pip install aws-service-catalog-puppet=={puppet_version}",
],
}
deploy_env_vars = [
{
"Type": "PLAINTEXT",
"Name": "PUPPET_ACCOUNT_ID",
"Value": t.Ref("AWS::AccountId"),
},
{"Type": "PLAINTEXT", "Name": "PUPPET_REGION", "Value": t.Ref("AWS::Region"),},
{
"Type": "PARAMETER_STORE",
"Name": "PARTITION",
"Value": t.Ref(partition_parameter),
},
{
"Type": "PARAMETER_STORE",
"Name": "PUPPET_ROLE_NAME",
"Value": t.Ref(puppet_role_name_parameter),
},
{
"Type": "PARAMETER_STORE",
"Name": "PUPPET_ROLE_PATH",
"Value": t.Ref(puppet_role_path_parameter),
},
]
if is_codecommit:
template.add_resource(
codecommit.Repository(
"CodeRepo",
RepositoryName=source.get("Configuration").get("RepositoryName"),
RepositoryDescription="Repo to store the servicecatalog puppet solution",
DeletionPolicy="Retain",
)
)
source_stage.Actions.append(
codepipeline.Actions(
RunOrder=1,
RoleArn=t.GetAtt("SourceRole", "Arn"),
ActionTypeId=codepipeline.ActionTypeId(
Category="Source", Owner="AWS", Version="1", Provider="CodeCommit",
),
OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")],
Configuration={
"RepositoryName": source.get("Configuration").get("RepositoryName"),
"BranchName": source.get("Configuration").get("BranchName"),
"PollForSourceChanges": source.get("Configuration").get(
"PollForSourceChanges", True
),
},
Name="Source",
)
)
if is_github:
source_stage.Actions.append(
codepipeline.Actions(
RunOrder=1,
ActionTypeId=codepipeline.ActionTypeId(
Category="Source",
Owner="ThirdParty",
Version="1",
Provider="GitHub",
),
OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")],
Configuration={
"Owner": source.get("Configuration").get("Owner"),
"Repo": source.get("Configuration").get("Repo"),
"Branch": source.get("Configuration").get("Branch"),
"OAuthToken": t.Join(
"",
[
"{{resolve:secretsmanager:",
source.get("Configuration").get("SecretsManagerSecret"),
":SecretString:OAuthToken}}",
],
),
"PollForSourceChanges": source.get("Configuration").get(
"PollForSourceChanges"
),
},
Name="Source",
)
)
if is_custom:
source_stage.Actions.append(
codepipeline.Actions(
RunOrder=1,
ActionTypeId=codepipeline.ActionTypeId(
Category="Source",
Owner="Custom",
Version=source.get("Configuration").get("CustomActionTypeVersion"),
Provider=source.get("Configuration").get(
"CustomActionTypeProvider"
),
),
OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")],
Configuration={
"GitUrl": source.get("Configuration").get("GitUrl"),
"Branch": source.get("Configuration").get("Branch"),
"PipelineName": t.Sub("${AWS::StackName}-pipeline"),
},
Name="Source",
)
)
webhook = codepipeline.Webhook(
"Webhook",
Authentication="IP",
TargetAction="Source",
AuthenticationConfiguration=codepipeline.WebhookAuthConfiguration(
AllowedIPRange=source.get("Configuration").get("GitWebHookIpAddress")
),
Filters=[
codepipeline.WebhookFilterRule(
JsonPath="$.changes[0].ref.id", MatchEquals="refs/heads/{Branch}"
)
],
TargetPipelineVersion=1,
TargetPipeline=t.Sub("${AWS::StackName}-pipeline"),
)
template.add_resource(webhook)
values_for_sub = {
"GitUrl": source.get("Configuration").get("GitUrl"),
"WebhookUrl": t.GetAtt(webhook, "Url"),
}
output_to_add = t.Output("WebhookUrl")
output_to_add.Value = t.Sub("${GitUrl}||${WebhookUrl}", **values_for_sub)
output_to_add.Export = t.Export(t.Sub("${AWS::StackName}-pipeline"))
template.add_output(output_to_add)
if is_codestarsourceconnection:
source_stage.Actions.append(
codepipeline.Actions(
RunOrder=1,
RoleArn=t.GetAtt("SourceRole", "Arn"),
ActionTypeId=codepipeline.ActionTypeId(
Category="Source",
Owner="AWS",
Version="1",
Provider="CodeStarSourceConnection",
),
OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")],
Configuration={
"ConnectionArn": source.get("Configuration").get("ConnectionArn"),
"FullRepositoryId": source.get("Configuration").get(
"FullRepositoryId"
),
"BranchName": source.get("Configuration").get("BranchName"),
"OutputArtifactFormat": source.get("Configuration").get(
"OutputArtifactFormat"
),
},
Name="Source",
)
)
if is_s3:
bucket_name = source.get("Configuration").get("S3Bucket")
if not scm_skip_creation_of_repo:
template.add_resource(
s3.Bucket(
bucket_name,
PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(
IgnorePublicAcls=True,
BlockPublicPolicy=True,
BlockPublicAcls=True,
RestrictPublicBuckets=True,
),
BucketEncryption=s3.BucketEncryption(
ServerSideEncryptionConfiguration=[
s3.ServerSideEncryptionRule(
ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(
SSEAlgorithm="AES256"
)
)
]
),
Tags=t.Tags.from_dict(
**{"ServiceCatalogPuppet:Actor": "Framework"}
),
BucketName=bucket_name,
VersioningConfiguration=s3.VersioningConfiguration(
Status="Enabled"
),
)
)
source_stage.Actions.append(
codepipeline.Actions(
RunOrder=1,
ActionTypeId=codepipeline.ActionTypeId(
Category="Source", Owner="AWS", Version="1", Provider="S3",
),
OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")],
Configuration={
"S3Bucket": bucket_name,
"S3ObjectKey": source.get("Configuration").get("S3ObjectKey"),
"PollForSourceChanges": source.get("Configuration").get(
"PollForSourceChanges"
),
},
Name="Source",
)
)
single_account_run_project_build_spec = dict(
version=0.2,
phases=dict(
install=install_spec,
build={
"commands": [
'echo "single_account: \\"${SINGLE_ACCOUNT_ID}\\"" > parameters.yaml',
"cat parameters.yaml",
"zip parameters.zip parameters.yaml",
"aws s3 cp parameters.zip s3://sc-puppet-parameterised-runs-${PUPPET_ACCOUNT_ID}/parameters.zip",
]
},
post_build={
"commands": [
"servicecatalog-puppet wait-for-parameterised-run-to-complete",
]
},
),
artifacts=dict(
name="DeployProject",
files=[
"ServiceCatalogPuppet/manifest.yaml",
"ServiceCatalogPuppet/manifest-expanded.yaml",
"results/*/*",
"output/*/*",
"exploded_results/*/*",
"tasks.log",
],
),
)
single_account_run_project_args = dict(
Name="servicecatalog-puppet-single-account-run",
Description="Runs puppet for a single account - SINGLE_ACCOUNT_ID",
ServiceRole=t.GetAtt(deploy_role, "Arn"),
Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}),
Artifacts=codebuild.Artifacts(Type="NO_ARTIFACTS",),
TimeoutInMinutes=480,
Environment=codebuild.Environment(
ComputeType=t.Ref(deploy_environment_compute_type_parameter),
Image="aws/codebuild/standard:4.0",
Type="LINUX_CONTAINER",
EnvironmentVariables=[
{
"Type": "PLAINTEXT",
"Name": "SINGLE_ACCOUNT_ID",
"Value": "CHANGE_ME",
},
]
+ deploy_env_vars,
),
Source=codebuild.Source(
Type="NO_SOURCE",
BuildSpec=yaml.safe_dump(single_account_run_project_build_spec),
),
)
single_account_run_project = template.add_resource(
codebuild.Project("SingleAccountRunProject", **single_account_run_project_args)
)
single_account_run_project_build_spec["phases"]["post_build"]["commands"] = [
"servicecatalog-puppet wait-for-parameterised-run-to-complete --on-complete-url $CALLBACK_URL"
]
single_account_run_project_args[
"Name"
] = "servicecatalog-puppet-single-account-run-with-callback"
single_account_run_project_args[
"Description"
] = "Runs puppet for a single account - SINGLE_ACCOUNT_ID and then does a http put"
single_account_run_project_args.get("Environment").EnvironmentVariables.append(
{"Type": "PLAINTEXT", "Name": "CALLBACK_URL", "Value": "CHANGE_ME",}
)
single_account_run_project_args["Source"] = codebuild.Source(
Type="NO_SOURCE",
BuildSpec=yaml.safe_dump(single_account_run_project_build_spec),
)
single_account_run_project_with_callback = template.add_resource(
codebuild.Project(
"SingleAccountRunWithCallbackProject", **single_account_run_project_args
)
)
stages = [source_stage]
if should_validate:
template.add_resource(
codebuild.Project(
"ValidateProject",
Name="servicecatalog-puppet-validate",
ServiceRole=t.GetAtt("DeployRole", "Arn"),
Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}),
Artifacts=codebuild.Artifacts(Type="CODEPIPELINE"),
TimeoutInMinutes=60,
Environment=codebuild.Environment(
ComputeType="BUILD_GENERAL1_SMALL",
Image="aws/codebuild/standard:4.0",
Type="LINUX_CONTAINER",
),
Source=codebuild.Source(
BuildSpec=yaml.safe_dump(
dict(
version="0.2",
phases={
"install": {
"runtime-versions": {"python": "3.7",},
"commands": [
f"pip install {puppet_version}"
if "http" in puppet_version
else f"pip install aws-service-catalog-puppet=={puppet_version}",
],
},
"build": {
"commands": [
"servicecatalog-puppet validate manifest.yaml"
]
},
},
)
),
Type="CODEPIPELINE",
),
Description="Validate the manifest.yaml file",
)
)
stages.append(
codepipeline.Stages(
Name="Validate",
Actions=[
codepipeline.Actions(
InputArtifacts=[codepipeline.InputArtifacts(Name="Source"),],
Name="Validate",
ActionTypeId=codepipeline.ActionTypeId(
Category="Build",
Owner="AWS",
Version="1",
Provider="CodeBuild",
),
OutputArtifacts=[
codepipeline.OutputArtifacts(Name="ValidateProject")
],
Configuration={
"ProjectName": t.Ref("ValidateProject"),
"PrimarySource": "Source",
},
RunOrder=1,
),
],
)
)
if is_manual_approvals:
deploy_stage = codepipeline.Stages(
Name="Deploy",
Actions=[
codepipeline.Actions(
InputArtifacts=[
codepipeline.InputArtifacts(Name="Source"),
codepipeline.InputArtifacts(Name="ParameterisedSource"),
],
Name="DryRun",
ActionTypeId=codepipeline.ActionTypeId(
Category="Build",
Owner="AWS",
Version="1",
Provider="CodeBuild",
),
OutputArtifacts=[
codepipeline.OutputArtifacts(Name="DryRunProject")
],
Configuration={
"ProjectName": t.Ref("DryRunProject"),
"PrimarySource": "Source",
},
RunOrder=1,
),
codepipeline.Actions(
ActionTypeId=codepipeline.ActionTypeId(
Category="Approval",
Owner="AWS",
Version="1",
Provider="Manual",
),
Configuration={
"NotificationArn": t.Ref("DryRunNotificationTopic"),
"CustomData": "Approve when you are happy with the dry run.",
},
Name="DryRunApproval",
RunOrder=2,
),
codepipeline.Actions(
InputArtifacts=[
codepipeline.InputArtifacts(Name="Source"),
codepipeline.InputArtifacts(Name="ParameterisedSource"),
],
Name="Deploy",
ActionTypeId=codepipeline.ActionTypeId(
Category="Build",
Owner="AWS",
Version="1",
Provider="CodeBuild",
),
OutputArtifacts=[
codepipeline.OutputArtifacts(Name="DeployProject")
],
Configuration={
"ProjectName": t.Ref("DeployProject"),
"PrimarySource": "Source",
},
RunOrder=3,
),
],
)
else:
deploy_stage = codepipeline.Stages(
Name="Deploy",
Actions=[
codepipeline.Actions(
InputArtifacts=[
codepipeline.InputArtifacts(Name="Source"),
codepipeline.InputArtifacts(Name="ParameterisedSource"),
],
Name="Deploy",
ActionTypeId=codepipeline.ActionTypeId(
Category="Build",
Owner="AWS",
Version="1",
Provider="CodeBuild",
),
OutputArtifacts=[
codepipeline.OutputArtifacts(Name="DeployProject")
],
Configuration={
"ProjectName": t.Ref("DeployProject"),
"PrimarySource": "Source",
"EnvironmentVariables": '[{"name":"EXECUTION_ID","value":"#{codepipeline.PipelineExecutionId}","type":"PLAINTEXT"}]',
},
RunOrder=1,
),
],
)
stages.append(deploy_stage)
pipeline = template.add_resource(
codepipeline.Pipeline(
"Pipeline",
RoleArn=t.GetAtt("PipelineRole", "Arn"),
Stages=stages,
Name=t.Sub("${AWS::StackName}-pipeline"),
ArtifactStore=codepipeline.ArtifactStore(
Type="S3",
Location=t.Sub(
"sc-puppet-pipeline-artifacts-${AWS::AccountId}-${AWS::Region}"
),
),
RestartExecutionOnUpdate=True,
)
)
if is_github:
template.add_resource(
codepipeline.Webhook(
"Webhook",
AuthenticationConfiguration=codepipeline.WebhookAuthConfiguration(
SecretToken=t.Join(
"",
[
"{{resolve:secretsmanager:",
source.get("Configuration").get("SecretsManagerSecret"),
":SecretString:SecretToken}}",
],
)
),
Filters=[
codepipeline.WebhookFilterRule(
JsonPath="$.ref",
MatchEquals="refs/heads/"
+ source.get("Configuration").get("Branch"),
)
],
Authentication="GITHUB_HMAC",
TargetPipeline=t.Ref(pipeline),
TargetAction="Source",
Name=t.Sub("${AWS::StackName}-webhook"),
TargetPipelineVersion=t.GetAtt(pipeline, "Version"),
RegisterWithThirdParty="true",
)
)
deploy_project_build_spec = dict(
version=0.2,
phases=dict(
install={
"runtime-versions": dict(python="3.7"),
"commands": [
f"pip install {puppet_version}"
if "http" in puppet_version
else f"pip install aws-service-catalog-puppet=={puppet_version}",
],
},
pre_build={
"commands": [
"servicecatalog-puppet --info expand --parameter-override-file $CODEBUILD_SRC_DIR_ParameterisedSource/parameters.yaml manifest.yaml",
]
},
build={
"commands": [
"servicecatalog-puppet --info deploy --num-workers ${NUM_WORKERS} manifest-expanded.yaml",
]
},
),
artifacts=dict(
name="DeployProject",
files=[
"manifest-expanded.yaml",
"results/*/*",
"output/*/*",
"exploded_results/*/*",
"tasks.log",
],
),
)
deploy_project_args = dict(
Name="servicecatalog-puppet-deploy",
ServiceRole=t.GetAtt(deploy_role, "Arn"),
Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}),
Artifacts=codebuild.Artifacts(Type="CODEPIPELINE",),
TimeoutInMinutes=480,
Environment=codebuild.Environment(
ComputeType=t.Ref(deploy_environment_compute_type_parameter),
Image="aws/codebuild/standard:4.0",
Type="LINUX_CONTAINER",
EnvironmentVariables=[
{
"Type": "PARAMETER_STORE",
"Name": "NUM_WORKERS",
"Value": t.Ref(num_workers_ssm_parameter),
},
{
"Type": "PARAMETER_STORE",
"Name": "SPOKE_EXECUTION_MODE_DEPLOY_ENV",
"Value": constants.SPOKE_EXECUTION_MODE_DEPLOY_ENV_PARAMETER_NAME,
},
]
+ deploy_env_vars,
),
Source=codebuild.Source(
Type="CODEPIPELINE", BuildSpec=yaml.safe_dump(deploy_project_build_spec),
),
Description="deploys out the products to be deployed",
)
deploy_project = template.add_resource(
codebuild.Project("DeployProject", **deploy_project_args)
)
deploy_project_build_spec["phases"]["build"]["commands"] = [
"servicecatalog-puppet --info dry-run manifest-expanded.yaml"
]
deploy_project_build_spec["artifacts"]["name"] = "DryRunProject"
deploy_project_args["Name"] = "servicecatalog-puppet-dryrun"
deploy_project_args["Description"] = "dry run of servicecatalog-puppet-dryrun"
deploy_project_args["Source"] = codebuild.Source(
Type="CODEPIPELINE", BuildSpec=yaml.safe_dump(deploy_project_build_spec),
)
dry_run_project = template.add_resource(
codebuild.Project("DryRunProject", **deploy_project_args)
)
bootstrap_project = template.add_resource(
codebuild.Project(
"BootstrapProject",
Name="servicecatalog-puppet-bootstrap-spokes-in-ou",
ServiceRole=t.GetAtt("DeployRole", "Arn"),
Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}),
Artifacts=codebuild.Artifacts(Type="NO_ARTIFACTS"),
TimeoutInMinutes=60,
Environment=codebuild.Environment(
ComputeType="BUILD_GENERAL1_SMALL",
Image="aws/codebuild/standard:4.0",
Type="LINUX_CONTAINER",
EnvironmentVariables=[
{"Type": "PLAINTEXT", "Name": "OU_OR_PATH", "Value": "CHANGE_ME"},
{
"Type": "PLAINTEXT",
"Name": "IAM_ROLE_NAME",
"Value": "OrganizationAccountAccessRole",
},
{"Type": "PLAINTEXT", "Name": "IAM_ROLE_ARNS", "Value": ""},
],
),
Source=codebuild.Source(
BuildSpec="version: 0.2\nphases:\n install:\n runtime-versions:\n python: 3.7\n commands:\n - pip install aws-service-catalog-puppet\n build:\n commands:\n - servicecatalog-puppet bootstrap-spokes-in-ou $OU_OR_PATH $IAM_ROLE_NAME $IAM_ROLE_ARNS\nartifacts:\n files:\n - results/*/*\n - output/*/*\n name: BootstrapProject\n",
Type="NO_SOURCE",
),
Description="Bootstrap all the accounts in an OU",
)
)
template.add_resource(
codebuild.Project(
"BootstrapASpokeProject",
Name="servicecatalog-puppet-bootstrap-spoke",
ServiceRole=t.GetAtt("DeployRole", "Arn"),
Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}),
Artifacts=codebuild.Artifacts(Type="NO_ARTIFACTS"),
TimeoutInMinutes=60,
Environment=codebuild.Environment(
ComputeType="BUILD_GENERAL1_SMALL",
Image="aws/codebuild/standard:4.0",
Type="LINUX_CONTAINER",
EnvironmentVariables=[
{
"Type": "PLAINTEXT",
"Name": "PUPPET_ACCOUNT_ID",
"Value": t.Sub("${AWS::AccountId}"),
},
{
"Type": "PLAINTEXT",
"Name": "ORGANIZATION_ACCOUNT_ACCESS_ROLE_ARN",
"Value": "CHANGE_ME",
},
{
"Type": "PLAINTEXT",
"Name": "ASSUMABLE_ROLE_IN_ROOT_ACCOUNT",
"Value": "CHANGE_ME",
},
],
),
Source=codebuild.Source(
BuildSpec=yaml.safe_dump(
dict(
version=0.2,
phases=dict(
install=install_spec,
build={
"commands": [
"servicecatalog-puppet bootstrap-spoke-as ${PUPPET_ACCOUNT_ID} ${ASSUMABLE_ROLE_IN_ROOT_ACCOUNT} ${ORGANIZATION_ACCOUNT_ACCESS_ROLE_ARN}"
]
},
),
)
),
Type="NO_SOURCE",
),
Description="Bootstrap given account as a spoke",
)
)
cloud_formation_events_queue = template.add_resource(
sqs.Queue(
"CloudFormationEventsQueue",
QueueName="servicecatalog-puppet-cloudformation-events",
Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}),
)
)
cloud_formation_events_queue_policy = template.add_resource(
sqs.QueuePolicy(
"CloudFormationEventsQueuePolicy",
Queues=[t.Ref(cloud_formation_events_queue)],
PolicyDocument={
"Id": "AllowSNS",
"Version": "2012-10-17",
"Statement": [
{
"Sid": "allow-send-message",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": ["sqs:SendMessage"],
"Resource": "*",
"Condition": {
"ArnEquals": {
"aws:SourceArn": t.Sub(
"arn:${AWS::Partition}:sns:*:${AWS::AccountId}:servicecatalog-puppet-cloudformation-regional-events"
)
}
},
}
],
},
)
)
spoke_deploy_bucket = template.add_resource(
s3.Bucket(
"SpokeDeployBucket",
PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(
IgnorePublicAcls=True,
BlockPublicPolicy=True,
BlockPublicAcls=True,
RestrictPublicBuckets=True,
),
BucketEncryption=s3.BucketEncryption(
ServerSideEncryptionConfiguration=[
s3.ServerSideEncryptionRule(
ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(
SSEAlgorithm="AES256"
)
)
]
),
Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}),
BucketName=t.Sub("sc-puppet-spoke-deploy-${AWS::AccountId}"),
VersioningConfiguration=s3.VersioningConfiguration(Status="Enabled"),
)
)
caching_bucket = template.add_resource(
s3.Bucket(
"CachingBucket",
PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(
BlockPublicAcls=True,
BlockPublicPolicy=True,
IgnorePublicAcls=True,
RestrictPublicBuckets=True,
),
BucketEncryption=s3.BucketEncryption(
ServerSideEncryptionConfiguration=[
s3.ServerSideEncryptionRule(
ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(
SSEAlgorithm="AES256"
)
)
]
),
Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}),
BucketName=t.Sub(
"sc-puppet-caching-bucket-${AWS::AccountId}-${AWS::Region}"
),
VersioningConfiguration=s3.VersioningConfiguration(Status="Enabled"),
)
)
template.add_output(
t.Output(
"CloudFormationEventsQueueArn",
Value=t.GetAtt(cloud_formation_events_queue, "Arn"),
)
)
template.add_output(t.Output("Version", Value=t.GetAtt(param, "Value")))
template.add_output(
t.Output(
"ManualApprovalsParam", Value=t.GetAtt(manual_approvals_param, "Value")
)
)
template.add_resource(
ssm.Parameter(
"DefaultTerraformVersion",
Type="String",
Name=constants.DEFAULT_TERRAFORM_VERSION_PARAMETER_NAME,
Value=constants.DEFAULT_TERRAFORM_VERSION_VALUE,
)
)
return template
| StarcoderdataPython |
3388552 | <filename>nm-pso.py<gh_stars>0
import numpy as np
from Particle import Particle
from mountain_scooter import MountainScooter
np.random.seed(11)
class InitialPointShapeException(Exception):
pass
class NM_PSO:
"""
Class that implement the Nelder-Mead Particle Swarm Optimization algorithm.
It take inspiration from the paper by <NAME> al. (A New Hybrid Nelder-Mead Particle Swarm Optimization for Coordination Optimization of Directional Overcurrent Relays, 2012).
"""
def __init__(self, n, fitness_function, max_iterations=50, reflection_parameter=1.0, expansion_parameter=2.0, contraction_parameter=0.5, shrinking_parameter=0.5, w=0.4, c1=2.0, c2=2.0, x_1=None, shift_coefficient=1.0, verbose=False):
"""
Initialize the Nelder-Mead Particle Swarm Optimization algorithm.
:param n: Number of dimensions. It represent the size of a single particle.
:param fitness_function: Fitness function to evaluate the particles.
:param max_iterations: Maximum number of iterations. Default value is 50.
:param reflection_parameter: Reflection parameter. Default value is 1.0.
:param expansion_parameter: Expansion parameter. Default value is 2.0.
:param contraction_parameter: Contraction parameter. Default value is 0.5.
:param shrinking_parameter: Shrinkage parameter. Default value is 0.5.
:param w: Inertia weight. Default value is 0.4.
:param c1: Cognitive parameter. Default value is 2.0.
:param c2: Social parameter. Default value is 2.0.
:param x_1: Used as the first point for the simplex generation. Defaults to None, which becomes a random point.
:param shift_coefficient: Shift coefficient for the simplex initialization. Default value is 2.
:param verbose: If True, print the progress of the algorithm. Default value is False.
"""
self.n = n
self.fitness_function = fitness_function
self.max_iterations = max_iterations
self.reflection_parameter = reflection_parameter
self.expansion_parameter = expansion_parameter
self.contraction_parameter = contraction_parameter
self.shrinking_parameter = shrinking_parameter
self.w = w
self.c1 = c1
self.c2 = c2
self.x_1 = x_1
self.shift_coefficient = shift_coefficient
self.verbose = verbose
self.population = self.initialize_population()
self.best_particle_population = None
def initialize_simplex(self):
"""
Initializes the first simplex to begin iterations
:return: The first simplex points as list of Particle objects.
"""
# If the user provided a point, and it is not in the right shape
if isinstance(self.x_1, np.ndarray):
if len(self.x_1) != self.n:
raise InitialPointShapeException(
f"Please enter an initial point having {self.n} dimensions.")
else: # If the user provided a point, and it is in the right shape
first_particle = Particle(0, self.x_1)
else: # If the user didn't provide a point
# Initialize the first point of the simplex randomly
random_value = np.random.random(size=self.n)
first_particle = Particle(0, random_value)
simplex_particles = [first_particle]
# Then, we will generate the other particles by shifting the first one
# in all the position defined by an eye matrix
identity = np.eye(self.n, dtype=int)
for i in range(self.n):
simplex_particles.append(Particle(i+1, first_particle.value + self.shift_coefficient * identity[i, :]))
return simplex_particles
def initialize_population(self):
"""
Initialize the population of particles
:return: A list of particles representing the population
"""
# Number of particles in the swarm as defined in the paper
N = 2 * self.n + 1
# the first n+1 particles are constructed using the predetermined starting point and a positive step size of 1.0
population = self.initialize_simplex()
# The remaining n particles are randomly generated
for i in range(self.n+1, N):
random_value = np.random.random(size=self.n)
population.append(Particle(i, random_value))
return population
def sort(self, reverse=True):
"""
Sort the population by fitness
:param reverse: If True, sort in descending order. If False, sort in ascending order. Default value is True.
"""
self.population.sort(key=lambda x: x.fitness, reverse=reverse)
def evaluate_population(self):
"""
Evaluate the fitness of each particle in the population
"""
for particle in self.population:
particle.evaluate(self.fitness_function)
def compute_best_particle_population(self):
"""
Compute the best particle in the population according to the fitness value.
:return: Particle object representing the best particle in the population.
"""
return self.population[np.argmax([particle.fitness for particle in self.population])]
def pso(self):
"""
Perform one iteration of the PSO algorithm
"""
for particle in self.population:
# Update the particle velocity and according to that update the particle value
particle.update_velocity(self.w, self.c1, self.c2, self.best_particle_population.value)
particle.update_value()
# Repair the particle if it is infeasible and evaluate its fitness
particle.evaluate(self.fitness_function)
def nelder_mead(self):
"""
Performs one iteration of the Nelder-Mead method to the top n+1 particles and update the (n+1)th particle:
- Computes the centroid
- Tries reflection, expansion, contraction, shrinking
- Updates the simplex
If it continuously tries to shrink the simplex, it re-initializes it with the best point
(https://en.wikipedia.org/wiki/Nelder%E2%80%93Mead_method)
"""
best_particle = self.population[0]
second_worst_particle = self.population[self.n-1]
worst_particle = self.population[self.n]
# Compute the centroid, excluding the worst point
centroid_value = np.mean([p.value for p in self.population[:self.n]], axis=0)
centroid_particle = Particle(-1, centroid_value)
centroid_particle.evaluate(self.fitness_function)
# REFLECTION
reflected_value = centroid_value + self.reflection_parameter * (centroid_value - worst_particle.value)
reflected_particle = Particle(worst_particle.id, reflected_value)
reflected_particle.evaluate(self.fitness_function)
# If the new reflected particle is better than the second worst, but worse than the best, we can break to the next iteration
if best_particle.fitness >= reflected_particle.fitness > second_worst_particle.fitness:
# then obtain a new simplex by replacing the worst point with the reflected point
self.population[self.n] = reflected_particle
if self.verbose:
print("\t✨ Simplex reflection applied ✨")
return
# EXPANSION
# If the point we've found is better than the best, we try to expand it
elif reflected_particle.fitness > best_particle.fitness:
expanded_value = centroid_value + self.expansion_parameter * (reflected_value - centroid_value)
expanded_particle = Particle(worst_particle.id, expanded_value)
expanded_particle.evaluate(self.fitness_function)
# We substitute the worst point with the better of the two
# If the expanded particle is better than the reflected particle
if expanded_particle.fitness > reflected_particle.fitness:
# then obtain a new simplex by replacing the worst point with the expanded point and break to the next iteration
self.population[self.n] = expanded_particle
if self.verbose:
print("\t✨ Simplex expansion tried, it worked! ✨")
else:
# otherwise, we substitute the worst point with the reflected point and break to the next iteration
self.population[self.n] = reflected_particle
if self.verbose:
print("\t✨ Simplex expansion tried, but reflection was better ✨")
return
# CONTRACTION
# Here it is certain that the reflected point is worse than the second worst
# If the reflected point we've found was better than the worst, we'll contract
elif reflected_particle.fitness > worst_particle.fitness:
contracted_value = centroid_value + self.contraction_parameter * (reflected_value - centroid_value)
contracted_particle = Particle(worst_particle.id, contracted_value)
contracted_particle.evaluate(self.fitness_function)
# If the contracted point is better than the reflected point
if contracted_particle.fitness > reflected_particle.fitness:
# then obtain a new simplex by replacing the worst point with the contracted point and break to the next iteration
self.population[self.n] = contracted_particle
if self.verbose:
print("\t✨ Simplex contraction applied ✨")
return
# we will contract too if the reflected point is worse than the worst one
elif reflected_particle.fitness <= worst_particle.fitness:
contracted_value = centroid_value + self.contraction_parameter * (worst_particle.value - centroid_value)
contracted_particle = Particle(worst_particle.id, contracted_value)
contracted_particle.evaluate(self.fitness_function)
# If the contracted point is better than the worst point
if contracted_particle.fitness > worst_particle.fitness:
# then obtain a new simplex by replacing the worst point with the contracted point and break to the next iteration
self.population[self.n] = contracted_particle
if self.verbose:
print("\t✨ Simplex contraction applied ✨")
return
# SHRINKING
# If none of the previous methods worked, we'll try our last resort: shrink contraction
# We'll want to redefine all the simplex points except for the best one.
for i in range(1, self.n + 1):
value = best_particle.value + self.shrinking_parameter * (self.population[i].value - best_particle.value)
self.population[i] = Particle(self.population[i].id, value)
self.population[i].evaluate(self.fitness_function)
if self.verbose:
print("\t✨ Simplex shrinking applied ✨")
def optimize(self):
"""
Search the optimal solution.
:return: The optimal solution.
"""
# Evaluate the fitness of each particle in the population
self.evaluate_population()
for i in range(self.max_iterations):
# Sort the population by fitness
self.sort()
self.best_particle_population = self.population[0]
if self.verbose:
print(
f"🚀 Performing iteration {i+1}:\n\t📊 "
f"Avg={round(np.average([p.fitness for p in self.population]), 2)}\t"
f"Best value={self.best_particle_population.fitness}")
# Apply Nelder-Mead operator to the top n+1 particles and update the (n+1)th particle.
self.nelder_mead()
self.best_particle_population = self.compute_best_particle_population()
# Apply PSO operator for updating the N particles.
self.pso()
self.best_particle_population = self.compute_best_particle_population()
return self.best_particle_population
def main():
print(f"🛵 Starting the MOUNTAIN SCOOTER optimization with NM-PSO algorithm...")
# initialize environment
env = MountainScooter(mass=0.4, friction=0.3, max_speed=1.8)
# The biases have to be the same amount of the nodes without considering the first layer
# The weights are the connections between the nodes of input and hidden layer + hidden and output layer
layer_nodes = [2, 8, 7, env.num_actions]
n_bias = np.sum(layer_nodes) - layer_nodes[0]
n_weights = 0
for i in range(0, len(layer_nodes) - 1):
n_weights += layer_nodes[i] * layer_nodes[i + 1]
# The dimension of a single particle is the number of biases and weights of the neural network
n = n_bias + n_weights
# initialize NM-PSO
nm_pso = NM_PSO(n=n
, fitness_function=lambda weights_and_biases: env.environment_execution(weights_and_biases, layer_nodes)
, max_iterations=20
, verbose=True)
optimal_particle = nm_pso.optimize()
env.environment_execution(optimal_particle.value, layer_nodes)
print(f"\n🏆 Optimal particle: {optimal_particle}")
env.render(show_plot=True)
print("✅ Complete!")
if __name__ == "__main__":
main() | StarcoderdataPython |
74623 | <gh_stars>1-10
"""
Use the Eratoshenes Algorithm to generate first 1229 prime numbers.
"""
max = 10000
smax = 100 # sqrt(10000)
lst = [] # number list, all True (is prime) at first
for i in range(max + 1): # initialization
lst.append(True)
for i in range(2, smax + 1): # Eratoshenes Algorithm
sieve = 2 * i
while sieve <= max:
lst[sieve] = False
sieve += i
for i in range(2, max + 1): # output in a line
if lst[i] == True:
print(i, end=',')
| StarcoderdataPython |
147426 | <gh_stars>1-10
# pyOCD debugger
# Copyright (c) 2018-2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...flash.flash import Flash
from ...core.coresight_target import CoreSightTarget
from ...core.memory_map import (FlashRegion, RamRegion, MemoryMap)
from ...debug.svd.loader import SVDFile
FLASH_ALGO_QSPI = {
'load_address' : 0x20000000,
'instructions': [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0x4604b570, 0x4616460d, 0x44484867, 0xb9e86800, 0x44784866, 0x44494966, 0x46086008, 0x44494965,
0x46086008, 0x44494964, 0x68096008, 0xf0006808, 0x4961fc96, 0x68084449, 0xf0002101, 0xb108f9bb,
0xbd702001, 0x49582001, 0x60084449, 0xe7f82000, 0x4604b510, 0x4854b984, 0x68004448, 0xd10b2801,
0x44494955, 0xf0006808, 0xb108fa1f, 0xbd102001, 0x494d2000, 0x60084449, 0xe7f82000, 0x494eb510,
0x22004449, 0x46116808, 0xfba4f000, 0x2001b108, 0x2000bd10, 0xb510e7fc, 0xf0244604, 0x4604407f,
0x44494945, 0x3280f44f, 0x46216808, 0xfb92f000, 0x2001b108, 0x2000bd10, 0xe92de7fc, 0x460441f0,
0x4617460d, 0x407ff024, 0x493b4604, 0x462b4449, 0x6808463a, 0xf0004621, 0x4606faf2, 0x4630b116,
0x81f0e8bd, 0xe7fb2000, 0x43f8e92d, 0x46884607, 0xf0274615, 0xbf00467f, 0xe0232400, 0x4449492e,
0x466a2304, 0x46316808, 0xfa8bf000, 0x0000f89d, 0x42885d29, 0xf89dd111, 0x1c601001, 0x42815c28,
0xf89dd10b, 0x1ca01002, 0x42815c28, 0xf89dd105, 0x1ce01003, 0x42815c28, 0x1930d002, 0x83f8e8bd,
0x1d241d36, 0xd3d94544, 0xe7f72000, 0x43f8e92d, 0x460f4606, 0xf0264614, 0xbf00457f, 0x0800f04f,
0x4915e01d, 0x23044449, 0x6808466a, 0xf0004629, 0xf89dfa58, 0x42a00000, 0xf89dd10b, 0x42a00001,
0xf89dd107, 0x42a00002, 0xf89dd103, 0x42a00003, 0x2001d002, 0x83f8e8bd, 0xf1081d2d, 0x45b80804,
0x2000d3df, 0x0000e7f6, 0x00000004, 0x00000d9a, 0x00000008, 0x0000000c, 0x00000020, 0xb118b570,
0x2d2018d5, 0xb902d800, 0x2a20bd70, 0x6001d101, 0x2501e7fa, 0x1e6c4095, 0xfa046805, 0x43b5f603,
0x0604ea01, 0x4335409e, 0xbf006005, 0xb510e7ec, 0x21064604, 0xf0006820, 0xbd10fd7b, 0x4604b57f,
0x2000460d, 0x90009003, 0x90029001, 0xaa032301, 0x68202165, 0xfd7ef000, 0xb1164606, 0xb0044630,
0xb125bd70, 0xd00d2d01, 0xd1212d02, 0xf89de015, 0xf040000c, 0x90030040, 0x000cf89d, 0x0080f040,
0xe0179003, 0x000cf89d, 0x0040f020, 0xf89d9003, 0xf040000c, 0x90030080, 0xf89de00c, 0xf040000c,
0x90030040, 0x000cf89d, 0x0080f020, 0xe0019003, 0xe7d42001, 0x4620bf00, 0xffb9f7ff, 0x90002000,
0x90029001, 0xaa032301, 0x68202161, 0xfd5ff000, 0xb10e4606, 0xe7c24630, 0x462a462b, 0x68204629,
0xfb83f000, 0xb10e4606, 0xe7b84630, 0xe7b62000, 0x4604b57f, 0x2000460e, 0x90009003, 0x90029001,
0xaa032301, 0x68202185, 0xfd24f000, 0xb1154605, 0xb0044628, 0x2304bd70, 0x4631461a, 0xf7ffa803,
0x4620ff6d, 0xff83f7ff, 0x90002000, 0x90029001, 0xaa032301, 0x68202181, 0xfd29f000, 0xb10d4605,
0xe7e64628, 0xe7e42000, 0x4605b57f, 0x90032000, 0x2000e00e, 0x90019000, 0x23019002, 0x2170aa03,
0xf0006828, 0x4604fcf7, 0x4620b114, 0xbd70b004, 0x000cf89d, 0x280009c0, 0x2000d0eb, 0xe92de7f6,
0xb0844dff, 0x46924606, 0x9c10469b, 0xf5c0b2e0, 0xe9dd7580, 0xe9cd1011, 0x90024100, 0x4652462b,
0x99056830, 0xfcf3f000, 0xb11f4607, 0xb0084638, 0x8df0e8bd, 0x442c44aa, 0xf7ff4630, 0x4680ffc5,
0x0f00f1b8, 0x4640d001, 0x4630e7f1, 0xff2ff7ff, 0x1011e9dd, 0x0305ebab, 0x4100e9cd, 0x46529002,
0x99056830, 0xfcd3f000, 0xb10f4607, 0xe7de4638, 0xe7dc2000, 0x4df3e92d, 0x4604b082, 0xb1209803,
0xd00b2801, 0xd11b2802, 0x2700e011, 0x0a03f04f, 0xf04f2600, 0x20000b02, 0xe0159001, 0xf04f2700,
0x26080a0b, 0x0b02f04f, 0x90012000, 0x2702e00c, 0x0a6bf04f, 0xf04f2608, 0x20000b32, 0xe0039001,
0xb0042001, 0x8df0e8bd, 0x4639bf00, 0xf7ff4620, 0x4680fef5, 0x0f00f1b8, 0x4640d001, 0x4631e7f1,
0xf7ff4620, 0x4680ff45, 0x0f00f1b8, 0x4640d001, 0xbf00e7e7, 0xf0006820, 0x2800fa40, 0x6820d0fa,
0xfa85f000, 0x682068a1, 0xfa91f000, 0xb10d4605, 0xe7d64628, 0x46514632, 0xf0006820, 0x4605faff,
0x4628b10d, 0x6820e7cd, 0x9a014659, 0xfb01f000, 0xb10d4605, 0xe7c44628, 0x7180f44f, 0xf0006820,
0x4605fb03, 0x4628b10d, 0x2103e7bb, 0xf0006820, 0x4605fb14, 0x4628b10d, 0x6820e7b3, 0xfa5ff000,
0x74209803, 0xe7ac2000, 0x4604b570, 0x46202100, 0xfea4f7ff, 0xb10d4605, 0xbd704628, 0x46202100,
0xfef6f7ff, 0xb10d4605, 0xe7f64628, 0x6820bf00, 0xf9f3f000, 0xd0fa2800, 0xf0006820, 0x6820fa38,
0xfb30f000, 0xf0006820, 0x2000fa3a, 0xbf007420, 0xe92de7e3, 0x46044df0, 0x46174688, 0x6820461d,
0x68406800, 0xfbb51c46, 0xb107fbf6, 0x2001b915, 0x8df0e8bd, 0x0005eb08, 0x428868e1, 0x2006d301,
0xf04fe7f6, 0xe00d0a00, 0x68204641, 0xfadef000, 0x46384632, 0xf0006861, 0x1badfc29, 0x44b04437,
0x0a01f10a, 0xd3ef45da, 0x4641b145, 0xf0006820, 0x462afacd, 0x68614638, 0xfc18f000, 0xf0006820,
0x2000fadc, 0xe92de7d4, 0x46044dfc, 0x4692460f, 0x6820461e, 0x68406800, 0x0801f100, 0x0f00f1ba,
0xb916d000, 0xe8bd2001, 0x19b88dfc, 0x428868e1, 0x2006d301, 0xf007e7f7, 0x1b7f0503, 0xfbb01970,
0x9001f0f8, 0x90002000, 0x4639e01e, 0xf0006820, 0xeba8fa9d, 0x68610205, 0x46511948, 0xfbe6f000,
0x0005eba8, 0xeba81a36, 0x44820005, 0x25004447, 0xf7ff4620, 0x4683fe99, 0x0f00f1bb, 0x4658d001,
0x9800e7d1, 0x90001c40, 0x0100e9dd, 0xd3dc4288, 0x4639b196, 0xf0006820, 0x6861fa79, 0x46321948,
0xf0004651, 0x4620fbc3, 0xfe7ef7ff, 0xf1bb4683, 0xd0010f00, 0xe7b64658, 0xf0006820, 0x2000fa7e,
0xe92de7b1, 0xb0844dff, 0x460e4605, 0x08f8461f, 0x7c289003, 0x2801b160, 0x2802d005, 0xf04fd107,
0x24080a6b, 0xf04fe008, 0x24080a0b, 0xbf00e004, 0x0a03f04f, 0xbf002400, 0xf04fbf00, 0xe0180b00,
0xe9cd2003, 0x94026000, 0x68282308, 0x9a064651, 0xfb50f000, 0xf1b84680, 0xd0030f00, 0xb0084640,
0x8df0e8bd, 0x30089806, 0x36089006, 0xf10b3f08, 0x98030b01, 0xd3e34583, 0x2003b17f, 0x6000e9cd,
0x463b9402, 0x46516828, 0xf0009a06, 0x4680fb33, 0x0f00f1b8, 0x4640d001, 0x2000e7e1, 0xe92de7df,
0xb0864dff, 0x460c4680, 0x08f0461e, 0xf8989005, 0xb1480010, 0xd0062801, 0xd1032802, 0x0b32f04f,
0xe0052500, 0xbf00bf00, 0x0b02f04f, 0xbf002500, 0x2000bf00, 0xe0379004, 0xf7ff4640, 0x0a21fd78,
0xebb11de0, 0xd00f2f10, 0x23082003, 0x0501e9cd, 0x46599400, 0x9a084640, 0xfe11f7ff, 0xb1a74607,
0xb00a4638, 0x8df0e8bd, 0xe9cd2003, 0x94000501, 0xf8d82308, 0x46590000, 0xf0009a08, 0x4682fb08,
0x0f00f1ba, 0x4650d001, 0x4640e7eb, 0xfddcf7ff, 0xb10f4607, 0xe7e44638, 0x30089808, 0x34089008,
0x98043e08, 0x90041c40, 0x0104e9dd, 0xd3c34288, 0x4640b376, 0xfd3bf7ff, 0x19a00a21, 0xebb11e40,
0xd00d2f10, 0x46332003, 0x0501e9cd, 0x46599400, 0x9a084640, 0xfdd3f7ff, 0xb1974607, 0xe7c04638,
0xe9cd2003, 0x94000501, 0xf8d84633, 0x46590000, 0xf0009a08, 0x4682facc, 0x0f00f1ba, 0x4650d001,
0x4640e7af, 0xfda0f7ff, 0xb10f4607, 0xe7a84638, 0xe7a62000, 0x4df0e92d, 0x4607b086, 0x4693460c,
0xf7ff4638, 0xf1bbfd04, 0xd0090f00, 0x5f80f5bb, 0xf5bbd01d, 0xd0124f00, 0x3f80f5bb, 0xe007d11f,
0x2005b11c, 0xe8bdb006, 0x25c78df0, 0xe0182600, 0x260325d8, 0xb108b2a0, 0xe7f32005, 0x2552e011,
0xf3c42603, 0xb108000e, 0xe7eb2005, 0x2520e009, 0xf3c42603, 0xb108000b, 0xe7e32005, 0x2001e001,
0xbf00e7e0, 0x42a068f8, 0x2006d801, 0x2000e7da, 0xc151a901, 0x90009004, 0x461a2300, 0x68384629,
0xf986f000, 0xf1b84680, 0xd0010f00, 0xe7c94640, 0xf7ff4638, 0x4682fd49, 0x0f00f1ba, 0x4640d001,
0x2000e7c0, 0x0000e7be, 0xb118b570, 0x2d2018d5, 0xb902d800, 0x2a20bd70, 0x6001d101, 0x2501e7fa,
0x1e6c4095, 0xfa046805, 0x43b5f603, 0x0604ea01, 0x4335409e, 0xbf006005, 0x4601e7ec, 0x68026808,
0x0fc06810, 0xe92d4770, 0x460545f8, 0x4614468a, 0x6828461e, 0xb10e6807, 0xe0011d38, 0x0008f107,
0xf8d84680, 0x90000000, 0x4628bf00, 0xffe5f7ff, 0xd0fa2800, 0xd9022c1f, 0xe8bd2001, 0x230085f8,
0x46512208, 0xf7ff4668, 0x2318ffbf, 0x46212205, 0xf7ff4668, 0x9800ffb9, 0x0000f8c8, 0xe7ec2000,
0xb1214601, 0xd0042901, 0xd1062902, 0x2000e003, 0x20014770, 0x2002e7fc, 0xf04fe7fa, 0xe7f730ff,
0x68084601, 0x68106802, 0x0001f000, 0x46014770, 0x6810680a, 0xf0226802, 0x60020201, 0x46014770,
0x6810680a, 0xf0426802, 0x60020201, 0xb5704770, 0x460c4605, 0x68066828, 0x4628bf00, 0xff9df7ff,
0xd0fa2800, 0x0001f004, 0x2c02b918, 0x2c20d301, 0x2001d901, 0x2001bd70, 0x0154ebc0, 0x22042313,
0xf7ff4630, 0x2000ff71, 0xe92de7f4, 0xb0824dff, 0x460f4682, 0xf8da4693, 0x68040000, 0x90016860,
0x900068a0, 0x4650bf00, 0xff77f7ff, 0xd0fa2800, 0xf7ff4638, 0x4605ffa5, 0xb9181c68, 0xb0062001,
0x8df0e8bd, 0x2308b13f, 0x46292202, 0xf7ff1d20, 0x2000ff4b, 0x4658e7f3, 0xff92f7ff, 0x98054680,
0xff8ef7ff, 0xf1084606, 0xb1080001, 0xb9081c70, 0xe7e42001, 0x22022308, 0xa8014629, 0xff34f7ff,
0x2202230c, 0xa8014641, 0xff2ef7ff, 0x22022310, 0xa8014631, 0xff28f7ff, 0x2202230c, 0x46684641,
0xff22f7ff, 0x22022310, 0x46684631, 0xff1cf7ff, 0x60609801, 0x60a09800, 0xe7c02000, 0x4604b570,
0x4616460d, 0x46322301, 0x46204629, 0xff2bf7ff, 0xb570bd70, 0x460d4604, 0x23004616, 0x46294632,
0xf7ff4620, 0xbd70ff20, 0x4604b570, 0x6820460d, 0xbf006806, 0xf7ff4620, 0x2800ff10, 0xf5b5d0fa,
0xd3015f80, 0xbd702001, 0x220c2304, 0xf1064629, 0xf7ff0014, 0x2000fee9, 0xb570e7f5, 0x460c4605,
0x68066828, 0x4628bf00, 0xfef7f7ff, 0xd0fa2800, 0x2c10b10c, 0x2001d901, 0x1e61bd70, 0x22042300,
0x0014f106, 0xfed0f7ff, 0xe7f52000, 0x4604b570, 0x6820460d, 0x46206803, 0xff22f7ff, 0xb1164606,
0xf7ff4620, 0x625dff24, 0xf4406818, 0x60183080, 0x4620b116, 0xff23f7ff, 0xb530bd70, 0x68184603,
0x46186804, 0xff0cf7ff, 0xb1154605, 0xf7ff4618, 0x6820ff0e, 0x3080f420, 0xb1156020, 0xf7ff4618,
0xbd30ff0e, 0x680a4601, 0x4a8e6810, 0x22036002, 0x22026042, 0x4a8c6082, 0x22006142, 0xf8c06242,
0xf8c02090, 0xf8c02094, 0xf8c020a8, 0x477020ac, 0x4dffe92d, 0x4616b086, 0xf8dd461d, 0xe9dda054,
0x98068712, 0x68046800, 0x90052000, 0xb10db116, 0xe0002001, 0x46832000, 0x0f00f1b8, 0xb10fd002,
0xe0002001, 0x90042000, 0x0f00f1ba, 0x2001d001, 0x2000e000, 0x20009003, 0x90029001, 0xd9032d08,
0xb00a2001, 0x8df0e8bd, 0xd9012f08, 0xe7f82001, 0x0f04f1ba, 0x2001d901, 0x9816e7f3, 0xd901281f,
0xe7ee2001, 0x0f00f1bb, 0x9804d003, 0x2001b108, 0x2318e7e7, 0xa8052208, 0xf7ff9907, 0xf1bbfe45,
0xd0090f00, 0xf4409805, 0x90050000, 0x23141e69, 0xa8052203, 0xfe38f7ff, 0xb3289804, 0xf4409805,
0x90054000, 0x230c1e79, 0xa8052203, 0xfe2cf7ff, 0x0003f008, 0xf007b968, 0xb9500003, 0x0000f8d8,
0x00a8f8c4, 0xd10f2f08, 0x0004f8d8, 0x00acf8c4, 0x463ae00a, 0xa8014641, 0xf898f000, 0xf8c49801,
0x980200a8, 0x00acf8c4, 0xb1689803, 0xf4409805, 0x90052000, 0xf8c49814, 0xf1aa0094, 0x23100101,
0xa8052202, 0xfe00f7ff, 0x22052307, 0x9916a805, 0xfdfaf7ff, 0xf8449805, 0x68200f90, 0x0001f040,
0x0990f844, 0xf8d4bf00, 0xf3c00090, 0x28000040, 0xf1bbd1f9, 0xd0190f00, 0x0003f006, 0xf005b958,
0xb9400003, 0x00a0f8d4, 0x2d086030, 0xf8d4d10e, 0x607000a4, 0xf8d4e00a, 0x900100a0, 0x00a4f8d4,
0x462a9002, 0x4630a901, 0xf850f000, 0xe7682000, 0xb085b530, 0x460d4604, 0x90012000, 0x90039002,
0x46039004, 0x46294602, 0x46209000, 0xff30f7ff, 0xbd30b005, 0x4df0e92d, 0x4606b086, 0x4614460f,
0xe9dd461d, 0xf8ddab0f, 0xb1048038, 0x2001b91d, 0xe8bdb006, 0x20008df0, 0xe8a1a901, 0x462b0d01,
0x46394622, 0x46309000, 0xff12f7ff, 0xe92de7f0, 0xb0864df0, 0x460f4606, 0x461d4614, 0xab0fe9dd,
0x8038f8dd, 0xb91db104, 0xb0062001, 0x8df0e8bd, 0x461a2300, 0x46304639, 0x0d30e88d, 0xfef8f7ff,
0x0000e7f3, 0x80780081, 0x00101002, 0x0301ea40, 0xd003079b, 0xc908e009, 0xc0081f12, 0xd2fa2a04,
0xf811e003, 0xf8003b01, 0x1e523b01, 0x4770d2f9, 0x52800000, 0x0003ffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000004, 0x00800000, 0x00000000, 0x00000000
],
# Function addresses
'pc_init': 0x20000021,
'pc_unInit': 0x20000071,
'pc_program_page': 0x200000db,
'pc_erase_sector': 0x200000b7,
'pc_eraseAll': 0x2000009d,
'static_base' : 0x20000000 + 0x00000020 + 0x00000db8,
'begin_stack' : 0x20001000,
'begin_data' : 0x20000000 + 0x1000,
'page_size' : 0x100,
'analyzer_supported' : False,
'analyzer_address' : 0x00000000,
'page_buffers' : [0x20001000, 0x20001100], # Enable double buffering
'min_program_length' : 0x100,
# Flash information
'flash_start': 0x0,
'flash_size': 0x800000,
'sector_sizes': (
(0x0, 0x10000),
)
}
FLASH_ALGO_EFLASH = {
'load_address' : 0x20000000,
# Flash algorithm as a hex string
'instructions': [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0x4604b570, 0x4616460d, 0x44484866, 0xb9986800, 0x44484865, 0x44494965, 0x48656048, 0x60084448,
0x49644608, 0x60084449, 0x49636808, 0xf8dbf000, 0x495c2001, 0x60084449, 0xbd702000, 0xb9414601,
0x44484858, 0x28016800, 0x2000d103, 0x444a4a55, 0x20006010, 0xb5104770, 0x44494956, 0x68082202,
0xf0002100, 0xb108fa2b, 0xbd102001, 0xe7fc2000, 0x4604b510, 0x6020f1a4, 0x4449494b, 0x1e496849,
0x0401ea00, 0x4449494b, 0x68082200, 0xf0004621, 0xb108fa15, 0xbd102001, 0xe7fc2000, 0x43f8e92d,
0x460d4604, 0xf1a44616, 0x493f6020, 0x68494449, 0xea001e49, 0x20040401, 0xea4f9000, 0x27000895,
0x493ce00e, 0x466b4449, 0x68084632, 0xf0004621, 0xb110f94b, 0xe8bd2001, 0x1d3683f8, 0x1c7f1d24,
0xd3ee4547, 0xe7f62000, 0x41fce92d, 0x460f4604, 0xf1a44690, 0x492c6020, 0x68494449, 0xea001e49,
0x20040401, 0x25009001, 0x492ae01a, 0xab014449, 0x6808466a, 0xf0004621, 0x2600f8c1, 0xf81de00c,
0x19a90006, 0x1001f818, 0xd0044288, 0x6020f104, 0xe8bd4428, 0x1c7681fc, 0xd3f02e04, 0x1d2d1d24,
0xd3e242bd, 0xe7f42000, 0x41fce92d, 0x460e4604, 0xf1a44617, 0x49146020, 0x68494449, 0xea001e49,
0x20040401, 0xf04f9001, 0xe0160800, 0x44494911, 0x466aab01, 0x46216808, 0xf890f000, 0xe0072500,
0x0005f81d, 0xd00242b8, 0xe8bd2001, 0x1c6d81fc, 0xd3f52d04, 0xf1081d24, 0x45b00804, 0x2000d3e6,
0x0000e7f3, 0x00000004, 0x0000000c, 0x00000014, 0x00000008, 0x0000001c, 0x02710000, 0x6804b510,
0xb11a6823, 0x041ff001, 0xe002601c, 0x041ff001, 0xbd10605c, 0x4604b510, 0x68096821, 0x5080f501,
0xf9eaf000, 0xb570bd10, 0x460d4604, 0x211f2200, 0xf7ff4620, 0x6821ffe3, 0xf5016809, 0x46295080,
0xf9cef000, 0xf7ff4620, 0x6861ffe5, 0x20016048, 0x70086861, 0xb570bd70, 0x25004604, 0x69a0bf00,
0x001cf000, 0xd0fa1e05, 0x0010f005, 0x4620b140, 0xf9b3f000, 0x69a0bf00, 0x000cf000, 0xd0fa1e05,
0xbd704628, 0x4604b570, 0xbf00460d, 0xf00069a0, 0x28000001, 0x61e5d1fa, 0x61602001, 0xf7ff4620,
0xbd70ffda, 0x4604b5f0, 0x462e460d, 0x0020f104, 0x2a1018c7, 0x2010d901, 0x4610e000, 0x18d04602,
0xd9002810, 0x21001ad2, 0x7838e004, 0x1c7f7030, 0x1c491c76, 0xd3f84291, 0xbdf04610, 0x4dfce92d,
0x460f4604, 0x461d4692, 0x68006820, 0x682e9001, 0x0b00f04f, 0x200046d8, 0x68609000, 0xb9107800,
0xe8bd2001, 0x68288dfc, 0x68614438, 0x42886849, 0x2005d901, 0x9801e7f5, 0xb1086980, 0xe7f02002,
0x68096821, 0x5080f501, 0xf963f000, 0xd0012880, 0xe7e62003, 0x4639e015, 0xf7ff9801, 0x4683ffa3,
0x000ff007, 0x46329000, 0xe9dd4651, 0xf7ff3000, 0x4680ffa9, 0x0608eba6, 0x44c24447, 0xf0009801,
0xb11ef93c, 0x0008f00b, 0xd0e42800, 0x0008f00b, 0x4446b100, 0x1b806828, 0xf00b6028, 0xb1080008,
0xe7be2006, 0xe7bc2000, 0x41f0e92d, 0x460d4604, 0x61e54616, 0x20026226, 0x46206160, 0xff5bf7ff,
0x46384607, 0x81f0e8bd, 0x4df0e92d, 0x460e4605, 0x461c4690, 0xf8d06828, 0xf04fb000, 0x68270a00,
0x78006868, 0x2001b910, 0x8df0e8bd, 0x44306820, 0x68496869, 0xd9014288, 0xe7f52005, 0x0018f8db,
0x2002b108, 0xf006e7f0, 0xb9180003, 0xf0007820, 0xb1080003, 0xe7e72007, 0x4631e00d, 0xf8d84658,
0xf7ff2000, 0x4682ffc1, 0xf1081d36, 0x1f3f0804, 0xf0004658, 0xb11ff8e2, 0x0008f00a, 0xd0ec2800,
0x0008f00a, 0x1d3fb100, 0x1bc06820, 0xf00a6020, 0xb1080008, 0xe7c72006, 0xe7c52000, 0x4ff8e92d,
0x460e4607, 0x461c4692, 0x68056838, 0x90002000, 0x8000f8d4, 0x0b02f04f, 0x78006878, 0x2001b910,
0x8ff8e8bd, 0x44306820, 0x68496879, 0xd9014288, 0xe7f52005, 0xb10869a8, 0xe7f12002, 0x0003f006,
0x7820b918, 0x0003f000, 0x2007b108, 0xe023e7e8, 0xf8da61ee, 0x62280000, 0x61682003, 0x9800e00c,
0x0008f000, 0x6820b140, 0x0008eba0, 0x46286020, 0xf893f000, 0xe7d32006, 0x900069a8, 0x000bea00,
0xd1ec4558, 0x0b06f04f, 0xf10a1d36, 0xf1a80a04, 0x46280804, 0xf881f000, 0x0f00f1b8, 0xbf00d1d8,
0xf00069a8, 0x28000004, 0x6820d0fa, 0x0008eba0, 0x46286020, 0xf871f000, 0xe7b12000, 0x41f0e92d,
0x460e4605, 0x68284617, 0xf04f6804, 0x68680800, 0xb9107800, 0xe8bd2001, 0x69a081f0, 0x2002b108,
0xb127e7f9, 0xd0132f01, 0xd12a2f02, 0x6868e01c, 0x42b06840, 0x2005d201, 0x61e6e7ed, 0x61602004,
0xf7ff4620, 0x4680fe88, 0xf0004620, 0xe01af846, 0x61e02000, 0x61602007, 0xf7ff4620, 0x4680fe7c,
0xf0004620, 0xe00ef83a, 0x1000f44f, 0x200761e0, 0x46206160, 0xfe6ff7ff, 0x46204680, 0xf82df000,
0x2004e001, 0xbf00e7c7, 0x0f08f1b8, 0x2006d101, 0x2000e7c1, 0x4601e7bf, 0x68026808, 0xf3c06990,
0x47701040, 0x4604b510, 0x68096821, 0x5080f501, 0xf826f000, 0xb510bd10, 0x68214604, 0xf5016809,
0xf0005080, 0xbd10f821, 0x4604b510, 0x68096821, 0x5080f501, 0xf81bf000, 0x211fbd10, 0x477060c1,
0x4601460a, 0x600b4b0c, 0x604b4b0c, 0x608b4b0c, 0x46014770, 0x47702080, 0xf44f4601, 0x47701000,
0xf44f4601, 0x47704080, 0x20034601, 0x46014770, 0x6990460a, 0x00004770, 0x11082801, 0x64050208,
0x0a0a0a08, 0x00000000, 0x00000000, 0x52400000, 0x00000000, 0x00400000, 0x00000000, 0x00000000,
0x00000000
],
# Relative function addresses
'pc_init': 0x20000021,
'pc_unInit': 0x2000005d,
'pc_program_page': 0x200000bd,
'pc_erase_sector': 0x20000091,
'pc_eraseAll': 0x20000077,
'static_base' : 0x20000000 + 0x00000020 + 0x000005e4,
'begin_stack' : 0x20000900,
'begin_data' : 0x20000000 + 0x1000,
'page_size' : 0x4000,
'analyzer_supported' : False,
'analyzer_address' : 0x00000000,
'page_buffers' : [0x20001000, 0x20005000], # Enable double buffering
'min_program_length' : 0x4000,
# Flash information
'flash_start': 0xa000000,
'flash_size': 0x400000,
'sector_sizes': (
(0x0, 0x4000),
)
}
class MuscaB1(CoreSightTarget):
VENDOR = "Arm"
memoryMap = MemoryMap(
FlashRegion(name='neflash', start=0x0A000000, length=0x00200000, access='rx',
blocksize=0x4000,
page_size=0x4000,
is_boot_memory=False,
is_default=False,
algo=FLASH_ALGO_EFLASH),
FlashRegion(name='seflash', start=0x1A000000, length=0x00200000, access='rxs',
blocksize=0x4000,
page_size=0x4000,
is_boot_memory=False,
is_default=False,
algo=FLASH_ALGO_EFLASH,
alias='neflash'),
FlashRegion(name='nqspi', start=0x00000000, length=0x00800000, access='rx',
blocksize=0x10000,
page_size=0x10000,
is_boot_memory=True,
is_external=True,
algo=FLASH_ALGO_QSPI),
FlashRegion(name='sqspi', start=0x10000000, length=0x00800000, access='rxs',
blocksize=0x10000,
page_size=0x10000,
is_boot_memory=True,
is_external=True,
algo=FLASH_ALGO_QSPI,
alias='nqspi'),
RamRegion( name='ncoderam', start=0x0A400000, length=0x00080000, access='rwx'),
RamRegion( name='scoderam', start=0x1A400000, length=0x00080000, access='rwxs',
alias='ncoderam'),
# Due to an errata, the first 8 kB of sysram is not accessible to the debugger.
RamRegion( name='nsysram', start=0x20002000, length=0x0007e000, access='rwx'),
RamRegion( name='ssysram', start=0x30002000, length=0x0007e000, access='rwxs',
alias='nsysram'),
)
def __init__(self, link):
super(MuscaB1, self).__init__(link, self.memoryMap)
self._svd_location = SVDFile.from_builtin("Musca_B1.svd")
| StarcoderdataPython |
12767 | <gh_stars>10-100
from boa_test.tests.boa_test import BoaTest
from boa.compiler import Compiler
from neo.Settings import settings
from neo.Prompt.Commands.BuildNRun import TestBuild
class TestContract(BoaTest):
def test_dict1(self):
output = Compiler.instance().load('%s/boa_test/example/DictTest1.py' % TestContract.dirname).default
out = output.write()
tx, results, total_ops, engine = TestBuild(out, [], self.GetWallet1(), '', '02')
self.assertEqual(len(results), 1)
self.assertIsInstance(results[0].GetMap(), dict)
self.assertEqual(results[0].GetBoolean(), True)
def test_dict2(self):
output = Compiler.instance().load('%s/boa_test/example/DictTest2.py' % TestContract.dirname).default
out = output.write()
tx, results, total_ops, engine = TestBuild(out, [], self.GetWallet1(), '', '02')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 7)
def test_dict3(self):
output = Compiler.instance().load('%s/boa_test/example/DictTest3.py' % TestContract.dirname).default
out = output.write()
string_ouput = output.to_s()
self.assertGreater(len(string_ouput), 0)
tx, results, total_ops, engine = TestBuild(out, [], self.GetWallet1(), '', '02')
self.assertEqual(len(results), 1)
self.assertIsInstance(results[0].GetMap(), dict)
| StarcoderdataPython |
3358483 | """
Check that a OutputStream can be assigned to sys.stdout.
"""
import support
from java import io
import sys
o = io.FileOutputStream("test003.out")
sys.stdout = o
print "hello"
f = open("test003.out", "r")
s = f.read(-1)
f.close()
if s != "hello\n":
raise support.TestError('Wrong redirected stdout ' + `s`)
| StarcoderdataPython |
1720286 | <filename>ml/ex04/template/least_squares.py
# -*- coding: utf-8 -*-
"""Exercise 3.
Least Square
"""
import numpy as np
def least_squares(y, tx):
"""calculate the least squares."""
#a = tx.T.dot(tx)
#b = tx.T.dot(y)
#return np.linalg.solve(a, b)
w = np.linalg.inv(tx.T @ tx) @ tx.T @ y
return 1 / 2 * np.mean((y - tx.dot(w)) ** 2), w
| StarcoderdataPython |
79034 |
"""
File: booleans.py
Copyright (c) 2016 <NAME>
License: MIT
This code was used to simply gain a better understanding of what different boolean expressions will do.
"""
C = 41 #There will be no output. This expression is setting the variable 'C' equal to 41.
C == 40 #The output will be 'False'. 40 is being compared to 0.
C != 40 and C < 41 #The output will be 'False',since both conditions are not true.
C != 40 or C < 41 #The output will be 'True', since the first condition is true, despite the second condition being false.
not C == 40 #The output will be 'True'. Because of the 'not' at the beginning, the output is reversed. The condition is False, but because of the 'not' the output will be 'True'.
not C > 40 #The output will be 'False'. The condition is true, but because of the 'not' at the beginning of the expression, the output will be reversed.
C <= 41 #The output will be 'True'.
not False #The output will be 'True'.
True and False #The output will be 'False'.
False or True #The output will be 'True'.
False or False or False #The output will be 'False'.
True and True and False #The output will be 'False'.
False == 0 #The output will be 'True'.
True == 0 #The output will be 'False'.
True == 1 #The output will be 'True'.
| StarcoderdataPython |
1773605 | <filename>__main__.py
import tkinter as tk
import os, sys, time, ctypes
from scipy.special import gamma
from mathgraph3D.core.global_imports import *
from mathgraph3D.core.Color import ColorStyle, Styles, Gradient, preset_styles, random_color
from mathgraph3D.core.functions.CartesianFunctions import Function2D, Function3D
from mathgraph3D.core.functions.ParametricFunctions import ParametricFunctionT, ParametricFunctionUV, RevolutionSurface
from mathgraph3D.core.functions.VectorFunctions import VectorField
from mathgraph3D.core.functions.StatisticalPlots import StatPlot2D, StatPlot3D
from mathgraph3D.core.functions.OtherCoordinateSystems import CylindricalFunction, SphericalFunction, PolarFunction
from mathgraph3D.core.functions.ImplicitPlots import ImplicitPlot2D, ImplicitSurface
from mathgraph3D.core.functions.ComplexFunctions import ComplexFunction
from mathgraph3D.core.functions.RecurrenceRelation import RecurrenceRelation
from mathgraph3D.core.plot.ClippingPlane import ClippingPlane
from mathgraph3D.core.plot.Plot import Plot
from mathgraph3D.gui.GUI import Interface
ALPHA_INCREMENT, BETA_INCREMENT = 0.1, 0.1;
INITIAL_ALPHA, INITIAL_BETA = 0.5, 0.8;
ZOOM_FACTOR = 20;
debug_dict = {};
#print(ctypes.windll.user32.GetSystemMetrics(0), ctypes.windll.user32.GetSystemMetrics(1));
def on_close():
global running;
running = False;
root.destroy();
def main():
WIDTH, HEIGHT = 683, 600
GUI = True
TESTING = False
def on_close():
nonlocal running
running = False
root.destroy()
if GUI:
WIDTH = ctypes.windll.user32.GetSystemMetrics(0) // 2;
HEIGHT = ctypes.windll.user32.GetSystemMetrics(1);
root = tk.Tk();
root.config(background="#ddddff");
root.state("zoomed");
embed = Interface(root, width=WIDTH, height=HEIGHT);
embed.grid(row=0, column=0, rowspan=6, padx=10);
os.environ["SDL_WINDOWID"] = str(embed.winfo_id());
root.protocol("WM_DELETE_WINDOW", on_close);
root.update();
else:
os.environ["SDL_VIDEO_WINDOW_POS"] = "600,50"
loops, total_time = 0, 0;
pygame.init();
screen = pygame.display.set_mode((WIDTH, HEIGHT), pygame.RESIZABLE);
pygame.display.set_caption("MathGraph 3D");
pygame.key.set_repeat(100, 50);
clock = pygame.time.Clock();
running = True;
if GUI:
plot = Plot(screen, gui=embed);
embed.set_plot(plot);
else:
plot = Plot(screen, axes_on=False, angles_on=False, labels_on=False, tracker_on=False, spin=False, line_numbers=True,
x_start=-16, x_stop=16, y_start=-16, y_stop=16, z_start=-16, z_stop=16, ticks=True, alpha=2.75, beta=0.35);
debug_dict["plot"] = plot;
## plot.add_clipping_plane(ClippingPlane(2, 3, 1, 0, 0, 0))
## f_x = lambda u, v: (3+sin(v)+cos(u))*cos(2*v)
## f_y = lambda u, v: (3+sin(v)+cos(u))*sin(2*v)
## f_z = lambda u, v: sin(u)+2*cos(v)
## ParametricFunctionUV(plot, lambda u, v: (f_x(u, v), f_y(u, v), f_z(u, v)), u_start=-math.pi, u_stop=math.pi, v_start=-math.pi, v_stop=math.pi, mesh_on=True, color_style=ColorStyle(Styles.INVNORM), u_anchors=120, v_anchors=30);
## CylindricalFunction(plot, lambda z, t: t/z, color_style=ColorStyle(Styles.GRADIENT, color1=(200, 100, 100), color2=(100, 100, 200)), z_anchors=70, mesh_on=False);
## Function3D(plot, lambda x, y: 2*(sin(x)+sin(y)), color_style=ColorStyle(Styles.CHECKERBOARD, color1=(200, 0, 50), color2=(255, 0, 255)));
## Function3D(plot, lambda x, y: sin(math.sqrt(x**2+y**2))-1, color_style=ColorStyle(Styles.SOLID, color=(255, 255, 255), apply_lighting=True, light_source=(0, 0, 4)), x_anchors=220, y_anchors=220, mesh_on=False);
## RevolutionSurface(plot, lambda x: x, surf_on=True);
## RecurrenceRelation(plot, lambda last: 2*last*(1-last), seed_value=0.75, unit_scale=1);
## PolarFunction(plot, lambda theta: 4);
## Function3D(plot, lambda x, y: (y*sin(x) + x*cos(y))/2, color_style=ColorStyle(Styles.CHECKERBOARD, color1=(100, 100, 255), color2=(150, 255, 150), apply_lighting=True, light_source=(0, 0, 6)));
## ImplicitPlot2D(plot, lambda x, y: y*sin(x) + x*cos(y) - 1, color=(0, 128, 255));
## ImplicitPlot2D(plot, lambda x, y: -(y*sin(x) + x*cos(y)) - 1, color=(255, 128, 0));
## ImplicitPlot2D(plot, lambda x, y: sqrt(sin(x**2+y**2))+1, line_weight=2, squares_x=10, squares_y=10);
## function = lambda x, y: sin(sin(x)+sin(y));
## Function3D(plot, function, color_style=ColorStyle(Styles.SOLID, color=(228, 228, 255), apply_lighting=True, light_source=(0, 0, 6)));
## VectorField.slope_field_of(plot, function, vecs_per_unit=2);
## ComplexFunction(plot, lambda z: complex(cmath.sqrt(z).imag, cmath.sqrt(z).real));
## ComplexFunction(plot, cmath.atan);
## ComplexFunction(plot, lambda z: cmath.sin(z)+cmath.cos(z));
## function = plot.plane_from_3_points((2, 7, -3), (1, -1, 0), (6, -2, 4));
## Function3D(plot, function, color_style=ColorStyle(Styles.SOLID, color=(200, 200, 255)));
## VectorField.slope_field_of(plot, function, vecs_per_unit=2);
## ComplexFunction(plot, cmath.exp, mesh_on=False, real_anchors=64, imag_anchors=64);
## ComplexFunction(plot, cmath.asin, mesh_on=True, real_anchors=32, imag_anchors=32, detection=True);
## ComplexFunction(plot, cmath.sin);
## func = lambda x, y: cos(x**2+y);
## tangent = plot.tangent_plane(func, 0, 1);
## plot.add_point((0, 1, func(0, 1)));
## Function3D(plot, func, color_style=preset_styles["cool-blue"]);
## Function3D(plot, tangent, color_style=ColorStyle(Styles.SOLID, color=(225, 225, 255)));
## Function3D(plot, lambda x, y: x**2-y**2, color_style=ColorStyle(Styles.VERTICAL_STRIPED, color1=(0, 0, 0), color2=(255, 255, 255), apply_lighting=True, light_source=(0, 0, 6)));
## Function3D(plot, lambda x, y: (x*x+y*y)/4);
## Function3D(plot, lambda x, y: cos(2*pi*(1.1*x-y))+cos(2*pi*(1.2*x-y))+cos(2*pi*(1.3*x-y))+cos(2*pi*(1.4*x-y))+cos(2*pi*(1.5*x-y))+cos(2*pi*(1.6*x-y))+cos(2*pi*(1.7*x-y))+cos(2*pi*(1.8*x-y))+cos(2*pi*(1.9*x-y))+cos(2*pi*(2*x-y)),
## x_anchors=100, y_anchors=100, color_style=ColorStyle(Styles.INVNORM), mesh_on=False)
## ImplicitSurface(plot, lambda x, y, z: x*y*z, lambda x, y, z: 0, color_style=ColorStyle(Styles.INVNORM), cubes_per_axis=30)
## Function3D(plot, lambda x, y: x+y*y/10 + 15*(sin(x)*sin(y))**4, color_style=ColorStyle(Styles.NORMAL_VECTOR), x_anchors=50, y_anchors=50, mesh_on=False)
while running:
initial_time = time.time();
try:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False;
break;
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
plot.set_alpha(INITIAL_ALPHA);
plot.set_beta(INITIAL_BETA);
elif event.key == pygame.K_LEFT:
plot.increment_alpha(-ALPHA_INCREMENT);
elif event.key == pygame.K_RIGHT:
plot.increment_alpha(ALPHA_INCREMENT);
elif event.key == pygame.K_UP:
plot.increment_beta(-BETA_INCREMENT);
elif event.key == pygame.K_DOWN:
plot.increment_beta(BETA_INCREMENT);
elif event.key == pygame.K_i:
plot.zoom(ZOOM_FACTOR);
elif event.key == pygame.K_o:
plot.zoom(-ZOOM_FACTOR);
plot.needs_update = True;
elif not GUI and event.key == pygame.K_RETURN:
pygame.image.save(screen, "C:\\Users\\sam\\Desktop\\3D Plots\\{}.png".format(input("name (no extension) > ")));
elif event.type == pygame.MOUSEMOTION:
if pygame.mouse.get_pressed()[0]:
plot.increment_alpha(event.rel[0] / 160);
plot.increment_beta(event.rel[1] / 320);
elif event.type == pygame.VIDEORESIZE:
WIDTH, HEIGHT = event.w, event.h;
screen = pygame.display.set_mode((WIDTH, HEIGHT), pygame.RESIZABLE);
plot.surface = screen;
plot.s_width, plot.s_height = screen.get_width()//2, screen.get_height()//2;
plot.needs_update = True;
plot.update();
clock.tick(50);
if running and GUI: root.update();
except Exception as e:
raise;
pygame.quit();
total_time += time.time() - initial_time;
loops += 1;
pygame.quit();
if TESTING:
msg = "In __main__: old projection (scaling done for every point)";
from performance_testing import record;
record(
{
"description": msg,
"total time": plot.time,
"total updates": plot.updates,
"average update time": plot.get_average_update_time(),
"average event loop time": loops / total_time
}
);
if __name__ == "__main__":
main();
| StarcoderdataPython |
1718913 | import json
from django.conf import settings
from django.core.serializers.json import DjangoJSONEncoder
from solc import compile_files
from solc.utils.string import force_bytes
from web3.utils.validation import validate_address
from jobboard import utils
class MemberInterface:
def __init__(self, contract_address):
self.web3 = utils.get_w3()
self.account = settings.WEB_ETH_COINBASE
self.contract_address = contract_address
try:
with open(settings.ABI_PATH + 'Member.abi.json', 'r') as ad:
self.abi = json.load(ad)
except FileNotFoundError:
path = 'dapp/contracts/Member.sol'
compiled = compile_files([path, ],
output_values=("abi", "ast", "bin", "bin-runtime",))
with open(settings.ABI_PATH + 'Member.abi.json', 'w+') as ad:
ad.write(json.dumps(compiled[path + ':Member']['abi']))
self.abi = compiled[path + ':Member']['abi']
self.__password = settings.COINBASE_PASSWORD_SECRET
self.contract = self.web3.eth.contract(abi=self.abi, address=self.contract_address)
def trim0x(self, text):
return text.rstrip('\x00')
def unlockAccount(self):
return self.web3.personal.unlockAccount(self.account, self.__password)
def lockAccount(self):
return self.web3.personal.lockAccount(self.account)
def new_fact(self, member_about_address, fact, fact_uuid):
validate_address(member_about_address)
self.unlockAccount()
txn_hash = self.contract.transact({'from': self.account}).new_fact(member_about_address,
json.dumps(fact, cls=DjangoJSONEncoder),
fact_uuid)
self.lockAccount()
return txn_hash
def approve_company_tokens(self, company_address, amount):
validate_address(company_address)
self.unlockAccount()
self.contract.transact({'from': self.account}).approve_company_tokens(company_address, 0)
txn_hash = self.contract.transact({'from': self.account}).approve_company_tokens(company_address, amount)
self.lockAccount()
return txn_hash
def new_vacancy(self, company_address, uuid, allowed):
self.unlockAccount()
txn_hash = self.contract.transact({'from': self.account}).new_vacancy(company_address, uuid, allowed)
self.lockAccount()
return txn_hash
def disable_vac(self, company_address, uuid):
self.unlockAccount()
txn_hash = self.contract.transact({'from': self.account}).disable_vac(company_address, uuid)
self.lockAccount()
return txn_hash
def enable_vac(self, company_address, uuid):
self.unlockAccount()
txn_hash = self.contract.transact({'from': self.account}).enable_vac(company_address, uuid)
self.lockAccount()
return txn_hash
def new_company_owner(self, company_address, member_address):
validate_address(company_address)
validate_address(member_address)
self.unlockAccount()
txn_hash = self.contract.transact({'from': self.account}).new_owner_member(company_address, member_address)
self.lockAccount()
return txn_hash
def new_company_collaborator(self, company_address, member_address):
validate_address(company_address)
validate_address(member_address)
self.unlockAccount()
txn_hash = self.contract.transact({'from': self.account}).new_collaborator_member(company_address,
member_address)
self.lockAccount()
return txn_hash
def new_company_member(self, company_address, member_address):
validate_address(company_address)
validate_address(member_address)
self.unlockAccount()
txn_hash = self.contract.transact({'from': self.account}).new_member(company_address,
member_address)
self.lockAccount()
return txn_hash
def new_action(self, company_address, vac_uuid, title, fee, appr):
self.unlockAccount()
txn_hash = self.contract.transact({'from': self.account}).new_vacancy_pipeline_action(company_address,
vac_uuid,
force_bytes(title),
int(float(
fee)) * 10 ** 18,
appr)
self.lockAccount()
return txn_hash
def del_collaborator_member(self, company_address, member_address):
self.unlockAccount()
txn_hash = self.contract.transact({'from': self.account}).del_collaborator_member(company_address,
member_address)
self.lockAccount()
return txn_hash
def del_owner_member(self, company_address, member_address):
self.unlockAccount()
txn_hash = self.contract.transact({'from': self.account}).del_owner_member(company_address,
member_address)
self.lockAccount()
return txn_hash
def subscribe(self, company_address, vac_uuid):
self.unlockAccount()
txn_hash = self.contract.transact({'from': self.account}).subscribe(company_address,
vac_uuid)
self.lockAccount()
return txn_hash
def approve_level_up(self, company_address, vac_uuid, member_address):
self.unlockAccount()
txn_hash = self.contract.transact({'from': self.account}).approve_level_up(company_address, vac_uuid,
member_address)
self.lockAccount()
return txn_hash
def reset_candidate_action(self, company_address, vac_uuid, member_address):
self.unlockAccount()
txn_hash = self.contract.transact({'from': self.account}).reset_member_action(company_address, vac_uuid,
member_address)
self.lockAccount()
return txn_hash
def change_status(self, status):
self.unlockAccount()
txn_hash = self.contract.transact({'from': self.account}).change_status(status)
self.lockAccount()
return txn_hash
def verify_fact(self, member_address, fact_uuid):
validate_address(member_address)
self.unlockAccount()
txn_hash = self.contract.transact({'from': self.account}).verify_fact(member_address, fact_uuid)
self.lockAccount()
return txn_hash
def change_action(self, company_address, vac_uuid, index, title, fee, appr):
self.unlockAccount()
txn_hash = self.contract.transact({'from': self.account}).change_vacancy_pipeline_action(company_address,
vac_uuid,
index,
force_bytes(title),
int(float(
fee)) * 10 ** 18,
appr)
self.lockAccount()
return txn_hash
def delete_action(self, company_address, vac_uuid, index):
self.unlockAccount()
txn_hash = self.contract.transact({'from': self.account}).delete_vacancy_pipeline_action(company_address,
vac_uuid,
index)
self.lockAccount()
return txn_hash
def change_vacancy_allowance_amount(self, company_address, vac_uuid, allowed):
self.unlockAccount()
txn_hash = self.contract.transact({'from': self.account}).change_vacancy_allowance_amount(company_address,
vac_uuid, allowed)
self.lockAccount()
return txn_hash
| StarcoderdataPython |
59872 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import json
import os
import codecs
from collections import Counter
import numpy as np
import tensorflow as tf
from parser.structs.vocabs.base_vocabs import CountVocab
from parser.structs.vocabs.token_vocabs import TokenVocab,GraphTokenVocab
from parser.structs.vocabs.index_vocabs import IndexVocab,GraphIndexVocab
from parser.structs.vocabs.second_order_vocab import GraphSecondIndexVocab
from parser.structs.vocabs.pointer_generator import PointerGenerator
from . import mrp_vocabs as mv
from parser.neural import nn, nonlin, embeddings, classifiers, recurrent
import sys
sys.path.append('./THUMT')
import thumt.layers as layers
from thumt.models.rnnsearch import _decoder as seq2seq_decoder
# from THUMT.thumt.models.rnnsearch import _decoder as seq2seq_decoder
import pdb
class RNNDecoderVocab(TokenVocab):
"""docstring for RNNDecoderVocab"""
#_save_str = 'tokens'
#=============================================================
def __init__(self, *args, **kwargs):
""""""
if 'placeholder_shape' not in kwargs:
kwargs['placeholder_shape'] = [None, None]
super(RNNDecoderVocab, self).__init__(*args, **kwargs)
return
def forward(self, layers, decoder_embeddings, sentence_feat, token_weights, sequence_length, input_feed=None, target_copy_hidden_states=None, coverage=None,\
variable_scope=None, reuse=False, debug=False):
"""
decoder embeddings [batch_size, decoder_seq_length, embedding_size]
layers: outputs of BiLSTM [batch_size, seq_length, hidden_size]
sentence_feat: the final output state of RNN [num_encoder_layers, batch_size, hidden_size]
token_weights: mask
input_feed: None or [batch_size, 1, hidden_size]
target_copy_hidden_states: None or [batch_size, seq_length, hidden_size]
coverage: None or [batch_size, 1, encode_seq_length]
"""
#pdb.set_trace()
with tf.variable_scope('Seq2SeqDecoder'):
with tf.variable_scope('linear'):
sentence_feat = classifiers.hidden(sentence_feat, self.recur_size,hidden_func=self.hidden_func,hidden_keep_prob=self.hidden_keep_prob)
with tf.variable_scope('memory_linear'):
layers = classifiers.hidden(layers, self.recur_size,hidden_func=self.hidden_func,hidden_keep_prob=self.hidden_keep_prob)
with tf.variable_scope('embedding_linear'):
decoder_embeddings = classifiers.hidden(decoder_embeddings, self.recur_size,hidden_func=self.hidden_func,hidden_keep_prob=self.hidden_keep_prob)
result = seq2seq_decoder(self.cell,decoder_embeddings,layers,sequence_length,sentence_feat)
return result
def count(self, mrp):
""""""
# pdb.set_trace()
mrp_file=json.load(open(mrp))
for sentence_id in mrp_file:
for current_data in mrp_file[sentence_id]['nodes']:
token = current_data[self.field]
self._count(token)
self.index_by_counts()
return True
def count_mrp(self, mrp):
""""""
return True
def _count(self, token):
if not self.cased:
token = token.lower()
self.counts[token] += 1
return
def get_root(self):
""""""
return 0
def add_sequence(self,tokens):
indices=[x if x!='' else 0 for x in tokens]
return indices
@property
def recur_size(self):
return self._config.getint(self, 'recur_size')
@property
def get_nodes_path(self):
return self._config.get('BaseNetwork', 'nodes_path')
class Seq2SeqIDVocab(RNNDecoderVocab, mv.NodeIDVocab):
def set_placeholders(self, indices, feed_dict={}):
""""""
feed_dict[self.placeholder] = indices
return feed_dict
#=============================================================
def get_bos(self):
""""""
return 0
#=============================================================
def get_eos(self):
""""""
return 0
class Seq2SeqNodeLabelPredictionVocab(TokenVocab, mv.LabelVocab):
def __init__(self, *args, **kwargs):
""""""
kwargs['placeholder_shape'] = [None, None]
super(Seq2SeqNodeLabelPredictionVocab, self).__init__(*args, **kwargs)
return
#=============================================================
def get_bos(self):
""""""
return '<BOS>'
#=============================================================
def get_eos(self):
""""""
return '<EOS>'
def forward(self, hiddens, source_attentions, target_attentions, pointer_generator_inputs, invalid_indexes=None,\
variable_scope=None, reuse=False, debug=False):
"""
Compute a distribution over the target dictionary
extended by the dynamic dictionary implied by copying target nodes.
:param hiddens: decoder outputs, [batch_size, num_target_nodes, hidden_size]
:param source_attentions: attention of each source node,
[batch_size, num_target_nodes, num_source_nodes]
:param source_attention_maps: a sparse indicator matrix
mapping each source node to its index in the dynamic vocabulary.
[batch_size, num_source_nodes, dynamic_vocab_size]
:param target_attentions: attention of each target node,
[batch_size, num_target_nodes, num_target_nodes]
:param target_attention_maps: a sparse indicator matrix
mapping each target node to its index in the dynamic vocabulary.
[batch_size, num_target_nodes, dynamic_vocab_size]
:param invalid_indexes: indexes which are not considered in prediction.
"""
#pdb.set_trace()
# target=self.placeholder['vocab_targets']
# copy_targets=self.placeholder['copy_targets']
# coref_targets=self.placeholder['coref_targets']
with tf.variable_scope('Seq2SeqNodeLabelPredictionVocab'):
source_attention_maps=pointer_generator_inputs['SrcCopyMap']
target_attention_maps=pointer_generator_inputs['TgtCopyMap'][:,1:]
outputs=self.predictor.forward(hiddens, source_attentions, source_attention_maps, target_attentions, target_attention_maps, invalid_indexes=None,debug=debug)
copy_targets=pointer_generator_inputs['SrcCopyIndices'][:,1:]
coref_targets=pointer_generator_inputs['TgtCopyIndices'][:,1:]
# pdb.set_trace()
loss_outputs = self.predictor.compute_loss(outputs['probabilities'],outputs['predictions'],self.placeholder,copy_targets,outputs['source_dynamic_vocab_size'],coref_targets,outputs['source_dynamic_vocab_size'],None,target_attentions,debug=debug)
outputs.update(loss_outputs)
outputs['loss'] = outputs['loss']*self.loss_interpolation
# outputs['loss']=tf.zeros(1,tf.float32)[0]
# outputs['n_correct_tokens']=tf.zeros(1,tf.float32)[0]
# outputs['n_correct_sequences'] = tf.zeros(1,tf.float32)[0]
return outputs
def decode(self, memory_bank, mask, states, copy_attention_maps, copy_vocabs, tag_luts, invalid_indexes, decoder_inputs):
# [batch_size, 1]
batch_size = tf.shape(memory_bank)[0]
tokens = tt.ones([batch_size, 1]) * self.index('<BOS>')
pos_tags = torch.ones(batch_size, 1) * self.index('<EOS>')
corefs = torch.zeros(batch_size, 1)
decoder_input_history = []
decoder_outputs = []
rnn_outputs = []
copy_attentions = []
coref_attentions = []
predictions = []
coref_indexes = []
decoder_mask = []
input_feed = None
coref_inputs = []
# A sparse indicator matrix mapping each node to its index in the dynamic vocab.
# Here the maximum size of the dynamic vocab is just max_decode_length.
coref_attention_maps = tf.cast(tf.zeros([batch_size, self.max_decode_length, self.max_decode_length + 1]), tf.float32)
# A matrix D where the element D_{ij} is for instance i the real vocab index of
# the generated node at the decoding step `i'.
coref_vocab_maps = tf.zeros([batch_size, self.max_decode_length + 1])
coverage = None
if self.use_coverage:
coverage = memory_bank.new_zeros(batch_size, 1, memory_bank.size(1))
for step_i in range(self.max_decode_length):
# 2. Decode one step.
decoder_output_dict = self.decoder(
decoder_inputs, memory_bank, mask, states, input_feed, coref_inputs, coverage)
_decoder_outputs = decoder_output_dict['decoder_hidden_states']
_rnn_outputs = decoder_output_dict['rnn_hidden_states']
_copy_attentions = decoder_output_dict['source_copy_attentions']
_coref_attentions = decoder_output_dict['target_copy_attentions']
states = decoder_output_dict['last_hidden_state']
input_feed = decoder_output_dict['input_feed']
coverage = decoder_output_dict['coverage']
# 3. Run pointer/generator.
if step_i == 0:
_coref_attention_maps = coref_attention_maps[:, :step_i + 1]
else:
_coref_attention_maps = coref_attention_maps[:, :step_i]
generator_output = self.generator(
_decoder_outputs, _copy_attentions, copy_attention_maps,
_coref_attentions, _coref_attention_maps, invalid_indexes)
_predictions = generator_output['predictions']
# 4. Update maps and get the next token input.
tokens, _predictions, pos_tags, corefs, _mask = self._update_maps_and_get_next_input(
step_i,
generator_output['predictions'].squeeze(1),
generator_output['source_dynamic_vocab_size'],
coref_attention_maps,
coref_vocab_maps,
copy_vocabs,
decoder_mask,
tag_luts,
invalid_indexes
)
# 5. Update variables.
decoder_input_history += [decoder_inputs]
decoder_outputs += [_decoder_outputs]
rnn_outputs += [_rnn_outputs]
copy_attentions += [_copy_attentions]
coref_attentions += [_coref_attentions]
predictions += [_predictions]
# Add the coref info for the next input.
coref_indexes += [corefs]
# Add the mask for the next input.
decoder_mask += [_mask]
# 6. Do the following chunking for the graph decoding input.
# Exclude the hidden state for BOS.
decoder_input_history = torch.cat(decoder_input_history[1:], dim=1)
decoder_outputs = torch.cat(decoder_outputs[1:], dim=1)
rnn_outputs = torch.cat(rnn_outputs[1:], dim=1)
# Exclude coref/mask for EOS.
# TODO: Answer "What if the last one is not EOS?"
predictions = torch.cat(predictions[:-1], dim=1)
coref_indexes = torch.cat(coref_indexes[:-1], dim=1)
decoder_mask = 1 - torch.cat(decoder_mask[:-1], dim=1)
return dict(
# [batch_size, max_decode_length]
predictions=predictions,
coref_indexes=coref_indexes,
decoder_mask=decoder_mask,
# [batch_size, max_decode_length, hidden_size]
decoder_inputs=decoder_input_history,
decoder_memory_bank=decoder_outputs,
decoder_rnn_memory_bank=rnn_outputs,
# [batch_size, max_decode_length, encoder_length]
copy_attentions=copy_attentions,
coref_attentions=coref_attentions
)
class Seq2SeqSrcCopyMapVocab(RNNDecoderVocab, mv.SrcCopyMapVocab):
def __init__(self, *args, **kwargs):
""""""
self._depth=-2
kwargs['placeholder_shape'] = [None, None, None]
super(Seq2SeqSrcCopyMapVocab, self).__init__(*args, **kwargs)
return
class Seq2SeqTgtCopyMapVocab(RNNDecoderVocab, mv.TgtCopyMapVocab):
def __init__(self, *args, **kwargs):
""""""
self._depth=-2
kwargs['placeholder_shape'] = [None, None, None]
super(Seq2SeqTgtCopyMapVocab, self).__init__(*args, **kwargs)
return
class Seq2SeqSrcCopyIndicesVocab(RNNDecoderVocab, mv.SrcCopyIndicesVocab):
def __init__(self, *args, **kwargs):
""""""
kwargs['placeholder_shape'] = [None, None]
super(Seq2SeqSrcCopyIndicesVocab, self).__init__(*args, **kwargs)
return
class Seq2SeqTgtCopyIndicesVocab(RNNDecoderVocab, mv.TgtCopyIndicesVocab):
def __init__(self, *args, **kwargs):
""""""
kwargs['placeholder_shape'] = [None, None]
super(Seq2SeqTgtCopyIndicesVocab, self).__init__(*args, **kwargs)
return
class Seq2SeqDecoderVocab(RNNDecoderVocab, mv.WordVocab):
def __init__(self, *args, **kwargs):
""""""
kwargs['placeholder_shape'] = [None, None]
super(Seq2SeqDecoderVocab, self).__init__(*args, **kwargs)
self.cell = layers.rnn_cell.LegacyGRUCell(self.recur_size)
# self.predictor = PointerGenerator(self, input_size, switch_input_size, vocab_size, vocab_pad_idx, force_copy)
return
#=============================================================
def get_bos(self):
""""""
return 0
#=============================================================
def get_eos(self):
""""""
return 0
def forward(self, layers, decoder_embeddings, sentence_feat, token_weights, sequence_length, input_feed=None, target_copy_hidden_states=None, coverage=None,\
variable_scope=None, reuse=False, debug=False):
"""
decoder embeddings [batch_size, decoder_seq_length, embedding_size]
layers: outputs of BiLSTM [batch_size, seq_length, hidden_size]
sentence_feat: the final output state of RNN [num_encoder_layers, batch_size, hidden_size]
token_weights: mask
input_feed: None or [batch_size, 1, hidden_size]
target_copy_hidden_states: None or [batch_size, seq_length, hidden_size]
coverage: None or [batch_size, 1, encode_seq_length]
"""
with tf.variable_scope('Seq2SeqDecoder'):
with tf.variable_scope('linear'):
sentence_feat = classifiers.hidden(sentence_feat, self.recur_size,hidden_func=self.hidden_func,hidden_keep_prob=self.hidden_keep_prob)
with tf.variable_scope('memory_linear'):
layers = classifiers.hidden(layers, self.recur_size,hidden_func=self.hidden_func,hidden_keep_prob=self.hidden_keep_prob)
with tf.variable_scope('embedding_linear'):
decoder_embeddings = classifiers.hidden(decoder_embeddings, self.recur_size,hidden_func=self.hidden_func,hidden_keep_prob=self.hidden_keep_prob)
result = seq2seq_decoder(self.cell,decoder_embeddings,layers,sequence_length,sentence_feat)
return result
class Seq2SeqAnchorPredictionVocab(RNNDecoderVocab, mv.AnchorVocab):
pass
class Seq2SeqGraphTokenVocab(GraphTokenVocab, mv.SemrelVocab):
def count(self, mrp):
""""""
# pdb.set_trace()
mrp_file=json.load(open(mrp))
for sentence_id in mrp_file:
for current_data in mrp_file[sentence_id]['nodes']:
token = current_data[self.field]
self._count(token)
self.index_by_counts()
return True
def _count(self, node):
if node not in ('_', ''):
node = node.split('|')
for edge in node:
edge = edge.split(':', 1)
head, rel = edge
self.counts[rel] += 1
return
#=============================================================
def get_bos(self):
""""""
return '_'
#=============================================================
def get_eos(self):
""""""
return '_'
#=============================================================
# def add(self, token):
# """"""
# indices=self.index(token)
# indices=[(index[0]+1,index[1]) for index in indices]
# return indices
class Seq2SeqGraphIndexVocab(GraphIndexVocab, mv.SemheadVocab):
def count(self, mrp):
""""""
# pdb.set_trace()
mrp_file=json.load(open(mrp))
for sentence_id in mrp_file:
for current_data in mrp_file[sentence_id]['nodes']:
token = current_data[self.field]
self._count(token)
self.index_by_counts()
return True
def _count(self, node):
if node not in ('_', ''):
node = node.split('|')
for edge in node:
edge = edge.split(':', 1)
head, rel = edge
self.counts[rel] += 1
return
# def add(self, token):
# """"""
# indices=self.index(token)
# indices=[index+1 for index in indices]
# return indices
#=============================================================
def get_bos(self):
""""""
return '_'
#=============================================================
def get_eos(self):
""""""
return '_'
class Seq2SeqSecondOrderGraphIndexVocab(GraphSecondIndexVocab, mv.SemheadVocab):
def count(self, mrp):
""""""
# pdb.set_trace()
mrp_file=json.load(open(mrp))
for sentence_id in mrp_file:
for current_data in mrp_file[sentence_id]['nodes']:
token = current_data[self.field]
self._count(token)
self.index_by_counts()
return True
def _count(self, node):
if node not in ('_', ''):
node = node.split('|')
for edge in node:
edge = edge.split(':', 1)
head, rel = edge
self.counts[rel] += 1
return
#=============================================================
def get_bos(self):
""""""
return '_'
#=============================================================
def get_eos(self):
""""""
return '_'
# def add(self, token):
# """"""
# indices=self.index(token)
# indices=[index+1 for index in indices]
# return indices | StarcoderdataPython |
100837 | from __future__ import annotations
from typing import Dict, List, Optional
import yaml
from base.basic import Circuit
from base.block import IBlock
from templates.block import BlockType, BlockTemplate, BlockFactory
from templates.conn import ConnTemplate
__author__ = "<NAME>"
__copyright__ = "Copyright 2021"
class Literal(str):
@staticmethod
def literal_presenter(dumper, data):
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
# end def
@staticmethod
def install():
yaml.add_representer(Literal, Literal.literal_presenter)
# end def
# end class
class CircuitFactory:
# This class is necessary, since objects cannot get applied properties dynamically
class InstHelper(object):
pass
# end class
def __init__(self):
self._blocks: List[BlockTemplate] = list()
self._conns: List[ConnTemplate] = list()
self._n_in = None
self._n_out = None
self._version: str = "1.0"
self._desc = None
# Flexible specify functions for loading data from file
self._read_funcs = list()
self._read_funcs.append({"name": "meta", "func": self._read_meta})
self._read_funcs.append({"name": "blocks", "func": self._read_blocks})
self._read_funcs.append({"name": "conns", "func": self._read_conns})
# Flexible specify functions for storing data to file
self._write_funcs = list()
self._write_funcs.append({"name": "meta", "func": self._write_meta})
self._write_funcs.append({"name": "blocks", "func": self._write_blocks})
self._write_funcs.append({"name": "conns", "func": self._write_conns})
# Flexible specify functions for instantiating
self._inst_funcs = list()
self._inst_funcs.append({"name": "blocks", "func": self._inst_blocks})
self._inst_funcs.append({"name": "conns", "func": self._inst_conns})
# end def
@property
def blocks(self) -> List[BlockTemplate]:
return self._blocks
# end def
@property
def conns(self) -> List[ConnTemplate]:
return self._conns
# end def
def add_desc(self, desc: str):
self._desc = desc
# end def
def add_block(self, block: BlockTemplate) -> BlockTemplate:
self._blocks.append(block)
return block
# end def
def add_conn(self, conn: ConnTemplate):
self._conns.append(conn)
# end def
def inst(self) -> Circuit:
inst_obj = CircuitFactory.InstHelper() # Helper object that allows the dynamic use of and sharing between instantiating functions
inst_obj.circuit: Circuit = Circuit()
self._do_inst(inst_obj)
return inst_obj.circuit
# end def
def _do_inst(self, inst_obj: CircuitFactory.InstHelper) -> None:
for f in self._inst_funcs:
f["func"](inst_obj)
# end for
# end def
def _inst_blocks(self, inst_obj: CircuitFactory.InstHelper):
bf = BlockFactory()
inst_obj.blocks: List[Dict[str, Optional[IBlock]]] = list()
for block in self._blocks:
inst_obj.blocks.append({"id": block.id, "block": bf.inst(block, value=block.value, box_name=block.box_name)})
# end for
# end def
def _inst_conns(self, inst_obj: CircuitFactory.InstHelper):
for conn in self._conns:
in_block = self._get_block_by_id(inst_obj, conn.in_block_id)
out_block = self._get_block_by_id(inst_obj, conn.out_block_id)
out_block.conn_to_prev_block(in_block, conn.in_block_pin, conn.out_block_pin)
# end for
# end def
@staticmethod
def _get_block_by_id(inst_obj: CircuitFactory.InstHelper, block_id: str):
if hasattr(inst_obj, "circuit"): # Avoid warnings
if block_id == "0":
return inst_obj.circuit.point
elif block_id == "1":
return inst_obj.circuit.drawer
elif block_id == "2":
return inst_obj.circuit.size
# end if
if hasattr(inst_obj, "blocks"):
for b in inst_obj.blocks:
if b["id"] == block_id:
return b["block"]
# end if
# end for
# end if
return None
# end def
def load(self, filename: str) -> CircuitFactory:
self._load(filename)
return self
# end def
def _load(self, filename: str, name: Optional[str] = None) -> None:
with open(filename) as f:
# Reset potential previous data
self._blocks = list()
self._conns = list()
self._name = name
docs = [doc for doc in yaml.load_all(f, Loader=yaml.FullLoader)]
doc = docs[0]
for key, value in doc.items():
for rf in self._read_funcs:
if rf["name"] == key:
rf["func"](value)
continue
# end if
# end for
# end for
# end with
# end def
def _read_meta(self, value):
version = value.get("version")
if version != self._version:
print(f"Version of loaded {version} file does not correspond to the internal version {self._version}.")
# end if
self._desc = value.get("desc")
# end def
def _read_blocks(self, value):
self._blocks = list()
for block in value:
name = block.get("name")
type_ = BlockType(block.get("type"))
n_in = block.get("n_in")
n_out = block.get("n_out")
id_ = block.get("id")
value = block.get("value")
box_name = block.get("box_name")
self._blocks.append(BlockTemplate(type_, n_in, n_out, id_, value, box_name, name=name))
# end for
# end def
def _read_conns(self, value):
self._conns = list()
for block in value:
in_block_id = block.get("in_block_id")
in_block_pin = block.get("in_block_pin")
out_block_id = block.get("out_block_id")
out_block_pin = block.get("out_block_pin")
self._conns.append(ConnTemplate(in_block_id, in_block_pin, out_block_id, out_block_pin))
# end for
# end def
def store(self, filename: str):
d = dict()
for wf in self._write_funcs:
wf["func"](d)
# end for
# write to file
###############
with open(filename, 'w') as f:
Literal.install()
yaml.dump(d, f, sort_keys=False)
# end with
# end def
def _write_meta(self, d: Dict):
block_name = "meta"
meta = dict() # List of bonds
meta["version"] = self._version
meta["desc"] = Literal(self._desc)
d[block_name] = meta
# end def
def _write_blocks(self, d: Dict):
if self._blocks is not None and len(self._blocks) > 0:
block_name = "blocks"
blocks = list() # List of blocks
for block in self._blocks:
_block = dict()
_block["name"] = block.name
_block["type"] = block.type.value
_block["n_in"] = block.n_in
_block["n_out"] = block.n_out
_block["id"] = block.id
if block.value is not None:
_block["value"] = block.value
if block.box_name is not None:
_block["box_name"] = block.box_name
blocks.append(_block)
# end for
d[block_name] = blocks
# end if
# end def
def _write_conns(self, d: Dict):
if self._conns is not None and len(self._conns) > 0:
block_name = "conns"
conns = list() # List of conns
for conn in self._conns:
_conn = dict()
_conn["in_block_id"] = conn.in_block_id
_conn["in_block_pin"] = conn.in_block_pin
_conn["out_block_id"] = conn.out_block_id
_conn["out_block_pin"] = conn.out_block_pin
conns.append(_conn)
# end for
d[block_name] = conns
# end if
# end def
# end class
| StarcoderdataPython |
3348238 | <gh_stars>10-100
# -*- coding: UTF-8 -*-
################################################################################
#
# Copyright (c) 2021 Baidu.com, Inc. All Rights Reserved
#
################################################################################
"""
The file used to evaluate the performance of CWatcher.
Authors: xiaocongxi(<EMAIL>)
Date: 2021/11/15 10:30:45
"""
import numpy as np
import paddle
from paddle.io import Dataset, DataLoader
import os
import argparse
from cwatcher import *
from tqdm import tqdm
def evaluate(reference_city, target_city, Encoder, Classifier, epoch_num):
"""
Evaluate the trained model on the given city.
Args:
reference_city: the reference city on which the hyperparameters are chosen. (e.g. Shenzhen)
target_city: the target city that you want to evaluate the model performance. (e.g. Huizhou)
Encoder: the model structure of encoders. (e.g. EncoderShenzhen)
Classifier: the model structure of classifer. (e.g. ClassifierShenzhen)
epoch_num: the number of epoch that the model has been trained for. (e.g. 100)
"""
Target_eval = City_Dataset(dataset_type='eval', city_name=target_city)
Target_eval_loader = DataLoader(dataset=Target_eval)
# load model param
root_path = os.path.dirname(os.path.realpath(__file__))
save_path = root_path + '/../model/ref_' + reference_city + '_epoch' + str(epoch_num) + '/'
encoder = Encoder()
classifier = Classifier()
encoder_state_dict = paddle.load(save_path + 'encoder.pdparams')
encoder.set_state_dict(encoder_state_dict)
classifier_state_dict = paddle.load(save_path + 'classifier.pdparams')
classifier.set_state_dict(classifier_state_dict)
encoder.eval()
classifier.eval()
auc = paddle.metric.Auc()
for features_T, y_T in tqdm(Target_eval_loader()):
features_T, y_T = paddle.cast(features_T, dtype='float32'), paddle.cast(y_T, dtype='float32')
encoded_T = encoder(features_T)
clf_T = classifier(encoded_T)
pred_T = np.concatenate((1 - clf_T.numpy(), clf_T.numpy()), axis=1)
y_T = paddle.reshape(y_T, [-1, 1]).numpy()
auc.update(preds=pred_T, labels=y_T)
auc_value = auc.accumulate()
print("AUC:{}".format(auc_value))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="evaluate c-watcher on target city.")
parser.add_argument("reference_city", type=str)
parser.add_argument("target_city", type=str)
parser.add_argument("-e", "--epoch_num", type=int, default="100")
parser.add_argument("-p", "--other_params", type=str, nargs="*")
args = parser.parse_args()
evaluate(reference_city=args.reference_city, \
target_city=args.target_city, \
Encoder=eval("Encoder" + args.reference_city), \
Classifier=eval("Classifier" + args.reference_city), \
epoch_num=args.epoch_num
) | StarcoderdataPython |
1606194 | from typing import List, Dict
import matplotlib.pyplot as plt
from datetime import datetime
from pandas import DataFrame
from kaori.plugins.gacha.engine.core import Card as GameCard, RarityName
import seaborn as sns
_dist = Dict[RarityName, int]
def get_rarity_dist(cards: List[GameCard]) -> _dist:
hist = {
r: 0 for r in RarityName
}
for c in cards:
hist[c.rarity] += 1
return hist
def rarity_histogram(dist: _dist, ts=datetime.now()):
df = DataFrame([[rarity, count] for rarity, count in dist.items()], columns=['rarity', 'count'])
fig, ax = plt.subplots()
bar = sns.barplot(x='rarity', y='count', data=df, ax=ax)
bar.set_title(f"Rarity Counts ({ts.strftime('%Y-%m-%d')})")
return bar.get_figure()
| StarcoderdataPython |
3255997 | <filename>custom_graphVPR/open3d_semantic-mesh-inspection.py
import numpy as np
import matplotlib.pyplot as plt
import open3d as o3d #version: 0.10.0.0
kimera_ros_path = "../kimera_semantics_ros/"
mesh_prefix = "../kimera_semantics_ros/graphVPR_mesh_results/tesse_shubodh_Inspiron_15_7000_Gaming_"
def read_cfg_csv(filename):
#If filename is txt of the form: category, red, green, blue, alpha, id
# Output will be a dict of the form:
#category: [red, green, blue, alpha, id]
dict1 = {}
no_lines = 0
with open(filename) as fh:
for line in fh:
line = line.strip()
if line:
line_parts = line.split(',')
cat, red, green, blue = line_parts[0], line_parts[1], line_parts[2], line_parts[3]
alpha, id = line_parts[4], line_parts[5]
dict1[cat] = [red, green, blue, alpha, id]
no_lines += 1
#remove 1st key (fields) to avoid confusion
dict1.pop('name')
return dict1, no_lines
def fulldict_to_mapping(full_dict):
'''
Input: full_dict i.e. full text file in the form of dict.
Output: dict(key, value) where key is unique [rgb] number for every category (value).
The task here is to extract semantic_category_name (like Books) from the semantic mesh.
The mesh has unique rgb values corresponding to which there exists a unique semantic_category_name.
So the simple idea is:
r + g + b must be unique, so let's first create a dict of key as (r+g+b)
and value as semantic_category_name from full_dict aka full text file. This is what
we're doing in this function.
Then, given the semantic mesh's rgb, we can easily extract its semantic_category_name,
doing ths in function `extract_mesh_labels()`.
'''
dict_map = {}
for key, val in full_dict.items():
r_, g_, b_, alp, id = val
rgb = r_ + g_ + b_
# Don't get confused: key of input is value of output
dict_map.setdefault(rgb, [])
dict_map[rgb].append(key)
#dict_map[rgb] = key
# Added the following manually because of strange behaviour in mesh file:
# It says r,g,b of 255,255,255 is there in mesh, but this isn't available in our full txt file.
# I don't understand: Why would you label your colour as 255,255,255... Not sure, but added so that code runs.
dict_map['255255255'] = 'dummy'
print(f"\nWarning: Added `dummy` category manually. Might face issues later, keep this in mind for future tasks. \n")
return dict_map
def extract_mesh_labels(semantic_pcd_filename, rgb_to_cat):
pcd = o3d.io.read_point_cloud(semantic_pcd_filename)
pcd_colors = np.asarray(pcd.colors)
uniq_clr, indices = np.unique(pcd_colors*255, axis=0, return_index=True)
labels = []
dict_labels = {}
for i in range(uniq_clr.shape[0]):
query_rgb = str(int(uniq_clr[i,0]))+ str(int(uniq_clr[i,1]))+ str(int(uniq_clr[i,2]))
vals = rgb_to_cat[query_rgb]
dict_labels.setdefault(query_rgb, [])
dict_labels[query_rgb].append(vals)
labels.append(vals)
num_of_instances = 0
for list_i in labels:
num_of_instances += len(list_i)
return labels, dict_labels, num_of_instances
def pcd_info(filename, viz=False):
pcd = o3d.io.read_point_cloud(filename)
if viz == True:
print(f"\n\nSHOWING FULL PCD:\n\n")
o3d.visualization.draw_geometries([pcd])
pcd_colors = np.asarray(pcd.colors)
uniq_clr, indices = np.unique(pcd_colors*255, axis=0, return_index=True)
return uniq_clr.shape[0] # Number of unique semantic categories in mesh
def pcd_show_cat(filename, cat, viz=False):
'''
Input: Filename along with category (r+g+b)
Output: Segmented out pcd + visualization of output if viz=True
'''
pcd = o3d.io.read_point_cloud(filename)
pcd_points = np.asarray(pcd.points)
pcd_colors = np.asarray(pcd.colors)
pcd_colors = pcd_colors*255
pcd_colors_ids = np.zeros(pcd_colors.shape[0])
for i in range(pcd_colors.shape[0]):
pcd_colors_ids[i] = str(int(pcd_colors[i,0]))+ str(int(pcd_colors[i,1]))+ str(int(pcd_colors[i,2]))
pcd_colors_ids_cat = np.argwhere(pcd_colors_ids==float(cat))
pcd_points_cat = np.squeeze(pcd_points[pcd_colors_ids_cat], axis=1)
pcd_colors_cat = np.squeeze(pcd_colors[pcd_colors_ids_cat], axis=1)
pcd_output = o3d.geometry.PointCloud()
pcd_output.points = o3d.utility.Vector3dVector(pcd_points_cat)
#pcd_output.colors = o3d.utility.Vector3dVector(pcd_colors_cat) #TODO: Doesn't seem to work in all cases.. Want to show it as per original colors
if viz == True:
#print(f"\n\nSHOWING FULL PCD:\n\n")
#o3d.visualization.draw_geometries([pcd])
print(f"\n\nSHOWING **SEMANTIC** SEGMENTED PCD with R+G+B ID - {str(cat)}:\n\n")
o3d.visualization.draw_geometries([pcd_output])
return pcd_output
def dbscan_clustering(pcd):
with o3d.utility.VerbosityContextManager(
o3d.utility.VerbosityLevel.Debug) as cm:
labels = np.array(
pcd.cluster_dbscan(eps=0.28, min_points=50, print_progress=True))
max_label = labels.max()
print(f"point cloud has {max_label + 1} clusters")
colors = plt.get_cmap("tab20")(labels / (max_label if max_label > 0 else 1))
colors[labels < 0] = 0
pcd.colors = o3d.utility.Vector3dVector(colors[:, :3])
print(f"\n\nSHOWING **INSTANCE** SEGMENTED PCD \n\n")
o3d.visualization.draw_geometries([pcd])
def print_general(num_sem_cat_mesh, rgb_to_cat, num_instances, num_lines):
print(f"\n\nPRINTING OUT GENERAL DETAILS:\n\n")
print(f"\nNumber of unique semantic categories in mesh, i.e. ESTIMATED, are {num_sem_cat_mesh}. \n")
print(f"Number of unique semantic categories in given csv file, i.e. GROUND TRUTH, are {len(rgb_to_cat)}. \n")
print(f"\nNumber of instance categories in mesh, i.e. ESTIMATED instances*, are {num_instances}. \n")
print(
'''
* Do note that without applying any clustering, there is no way to find num of estimated instances.
The above 'ESTIMATED instances' just includes all categories with a unique R+G+B value from the txt file.
That does not mean all those instances were actually found. So it can be thought of as an upper bound value.
''')
print(f"\nNumber of instance categories in given csv file, i.e. TOTAL instances**, are {num_lines + 1}. \n")
print(f" ** added 1 for manually added dummy category. See fulldict_to_mapping() function.\n")
if __name__ == '__main__':
mesh_name = "26094_3367494325221832115.ply"
num_sem_cat_mesh = pcd_info(mesh_prefix + mesh_name, viz=True)
full_dict, num_lines = read_cfg_csv(kimera_ros_path + "cfg/tesse_multiscene_office1_segmentation_mapping.csv")
rgb_to_cat = fulldict_to_mapping(full_dict)
_, dict_meshlabels, num_instances = extract_mesh_labels(mesh_prefix + mesh_name, rgb_to_cat)
print_general(num_sem_cat_mesh, rgb_to_cat, num_instances, num_lines)
Chairs = 1022550; Floor = 124133141; Table = 54176239
pcd_segmented = pcd_show_cat(mesh_prefix+mesh_name, Chairs, viz=True)
dbscan_clustering(pcd_segmented)
#TODO: Just using Open3D's dbscan for clustering with a bit of hyperparameter tuning.
# However, the remaining task is to do what is suggested exactly in the
# 3DSceneGraphs paper (1. using PCL library, not Open3D; 2. Euclidean instead of DBSCAN
# See paper for more details.)
# and accurately do clustering i.e. going from semantic segmentation to instance segmentation.
| StarcoderdataPython |
1795644 | <reponame>noamshemesh/jasper-milight
import milight
import re
PRIORITY = 10
WORDS = ["LIGHT", "LIGHTS", "ON", "OFF", "DIM", "WHITE", "FIRST", "SECOND", "THIRD", "FOURTH", "ALL"]
template = re.compile(r'.*\b(turn|all|first|second|third|fourth)\b.*\blights\b.*\b(on|off|white|dim)\b.*', re.IGNORECASE)
words_to_numbers = {
'turn': 0,
'all': 0,
'first': 1,
'second': 2,
'third': 3,
'forth': 4
}
def isValid(text):
return bool(template.search(text))
def message(text, command, group):
message = ''
if command == 'dim':
message += 'Dimming '
elif command == 'white':
message += 'Setting '
else:
message += 'Turning '
if group == 'all' or group == 'turn':
message += 'all lights '
else:
message += group + ' group '
if command != 'dim':
if command == 'white':
message += 'to white'
else:
message += command
return message
def handle(text, mic, profile):
ip = profile['milight']['ip']
port = profile['milight']['port'] if hasattr(profile['milight'], 'port') else 8899
wait_duration = profile['milight']['wait_duration'] if hasattr(profile['milight'], 'wait_duration') else 0.200
bulb_type = profile['milight']['bulb_type'] if hasattr(profile['milight'], 'bulb_type') else ['rgbw']
controller = milight.MiLight({'host': ip, 'port': port}, wait_duration=wait_duration)
light = milight.LightBulb(bulb_type)
m = template.search(text)
light_group_string = m.group(1)
command = m.group(2).lower()
if not light_group_string:
light_group_string = 'all'
light_group_string = light_group_string.lower()
light_group_int = words_to_numbers[light_group_string]
try:
if command == 'dim' or command == 'white':
if (command == 'white'):
controller.send(light.white(light_group_int))
controller.send(light.brightness(50 if command == 'dim' else 100, light_group_int))
else:
print('sending to controller ' + command + '(' + str(light_group_int) + ')')
lightCommand = getattr(light, command)
controller.send(lightCommand(light_group_int))
mic.say(message(text, command, light_group_string))
except Exception as e:
print(e)
mic.say('Fail to send command to lights')
# tests
#
# class Mic:
# def say(a, b):
# print(b)
#
#
# handle("all lights on", Mic(), {"milight": {"ip": "192.168.1.109"}})
| StarcoderdataPython |
1635050 | from django.shortcuts import render, redirect, HttpResponse
from django.contrib import messages
from .models import User, UserManager
import bcrypt
def admin(request): #GET REQUEST
context = {
"all_the_users": User.objects.all(),
}
return render(request, "login.html", context)
def register(request): #POST REQUEST
errors = User.objects.register_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect("/admin")
elif request.method != "POST":
return redirect("/admin")
elif request.method == "POST":
password = request.POST['password']
pw_hash = bcrypt.hashpw(password.encode(), bcrypt.gensalt()).decode()
user = User.objects.create(first_name = request.POST["first_name"], last_name = request.POST["last_name"], display_name = request.POST['display_name'], email = request.POST['email'], password=pw_<PASSWORD>)
request.session['user_id'] = user.id
return redirect("/home")
def login(request): #POST REQUEST
errors = User.objects.login_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect("/admin")
user = User.objects.filter(email=request.POST["email"])
if user:
logged_user = user[0]
if bcrypt.checkpw(request.POST['password'].encode(), logged_user.password.encode()):
request.session["user_id"] = logged_user.id
return redirect("/home")
elif request.method != "POST":
return redirect("/admin")
def logout(request): #POST REQUEST
request.session.flush()
return redirect("/") | StarcoderdataPython |
6975 | <filename>demos/odyssey/dodyssey.py<gh_stars>10-100
#Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
__version__='3.3.0'
__doc__=''
#REPORTLAB_TEST_SCRIPT
import sys, copy, os
from reportlab.platypus import *
_NEW_PARA=os.environ.get('NEW_PARA','0')[0] in ('y','Y','1')
_REDCAP=int(os.environ.get('REDCAP','0'))
_CALLBACK=os.environ.get('CALLBACK','0')[0] in ('y','Y','1')
if _NEW_PARA:
def Paragraph(s,style):
from rlextra.radxml.para import Paragraph as PPPP
return PPPP(s,style)
from reportlab.lib.units import inch
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
import reportlab.rl_config
reportlab.rl_config.invariant = 1
styles = getSampleStyleSheet()
Title = "The Odyssey"
Author = "Homer"
def myTitlePage(canvas, doc):
canvas.saveState()
canvas.restoreState()
def myLaterPages(canvas, doc):
canvas.saveState()
canvas.setFont('Times-Roman',9)
canvas.drawString(inch, 0.75 * inch, "Page %d" % doc.page)
canvas.restoreState()
def go():
def myCanvasMaker(fn,**kw):
from reportlab.pdfgen.canvas import Canvas
canv = Canvas(fn,**kw)
# attach our callback to the canvas
canv.myOnDrawCB = myOnDrawCB
return canv
doc = BaseDocTemplate('dodyssey.pdf',showBoundary=0)
#normal frame as for SimpleFlowDocument
frameT = Frame(doc.leftMargin, doc.bottomMargin, doc.width, doc.height, id='normal')
#Two Columns
frame1 = Frame(doc.leftMargin, doc.bottomMargin, doc.width/2-6, doc.height, id='col1')
frame2 = Frame(doc.leftMargin+doc.width/2+6, doc.bottomMargin, doc.width/2-6,
doc.height, id='col2')
doc.addPageTemplates([PageTemplate(id='First',frames=frameT, onPage=myTitlePage),
PageTemplate(id='OneCol',frames=frameT, onPage=myLaterPages),
PageTemplate(id='TwoCol',frames=[frame1,frame2], onPage=myLaterPages),
])
doc.build(Elements,canvasmaker=myCanvasMaker)
Elements = []
ChapterStyle = copy.deepcopy(styles["Heading1"])
ChapterStyle.alignment = TA_CENTER
ChapterStyle.fontsize = 14
InitialStyle = copy.deepcopy(ChapterStyle)
InitialStyle.fontsize = 16
InitialStyle.leading = 20
PreStyle = styles["Code"]
def newPage():
Elements.append(PageBreak())
chNum = 0
def myOnDrawCB(canv,kind,label):
print('myOnDrawCB(%s)'%kind, 'Page number=', canv.getPageNumber(), 'label value=', label)
def chapter(txt, style=ChapterStyle):
global chNum
Elements.append(NextPageTemplate('OneCol'))
newPage()
chNum += 1
if _NEW_PARA or not _CALLBACK:
Elements.append(Paragraph(txt, style))
else:
Elements.append(Paragraph(('foo<onDraw name="myOnDrawCB" label="chap %d"/> '%chNum)+txt, style))
Elements.append(Spacer(0.2*inch, 0.3*inch))
if useTwoCol:
Elements.append(NextPageTemplate('TwoCol'))
def fTitle(txt,style=InitialStyle):
Elements.append(Paragraph(txt, style))
ParaStyle = copy.deepcopy(styles["Normal"])
ParaStyle.spaceBefore = 0.1*inch
if 'right' in sys.argv:
ParaStyle.alignment = TA_RIGHT
elif 'left' in sys.argv:
ParaStyle.alignment = TA_LEFT
elif 'justify' in sys.argv:
ParaStyle.alignment = TA_JUSTIFY
elif 'center' in sys.argv or 'centre' in sys.argv:
ParaStyle.alignment = TA_CENTER
else:
ParaStyle.alignment = TA_JUSTIFY
useTwoCol = 'notwocol' not in sys.argv
def spacer(inches):
Elements.append(Spacer(0.1*inch, inches*inch))
def p(txt, style=ParaStyle):
if _REDCAP:
fs, fe = '<font color="red" size="+2">', '</font>'
n = len(txt)
for i in range(n):
if 'a'<=txt[i]<='z' or 'A'<=txt[i]<='Z':
txt = (txt[:i]+(fs+txt[i]+fe))+txt[i+1:]
break
if _REDCAP>=2 and n>20:
j = i+len(fs)+len(fe)+1+int((n-1)/2)
while not ('a'<=txt[j]<='z' or 'A'<=txt[j]<='Z'): j += 1
txt = (txt[:j]+('<b><i><font size="+2" color="blue">'+txt[j]+'</font></i></b>'))+txt[j+1:]
if _REDCAP==3 and n>20:
n = len(txt)
fs = '<font color="green" size="+1">'
for i in range(n-1,-1,-1):
if 'a'<=txt[i]<='z' or 'A'<=txt[i]<='Z':
txt = txt[:i]+((fs+txt[i]+fe)+txt[i+1:])
break
Elements.append(Paragraph(txt, style))
firstPre = 1
def pre(txt, style=PreStyle):
global firstPre
if firstPre:
Elements.append(NextPageTemplate('OneCol'))
newPage()
firstPre = 0
spacer(0.1)
p = Preformatted(txt, style)
Elements.append(p)
def parseOdyssey(fn):
from time import time
E = []
t0=time()
text = open(fn,'r').read()
i0 = text.index('Book I')
endMarker = 'covenant of peace between the two contending parties.'
i1 = text.index(endMarker)+len(endMarker)
PREAMBLE=list(map(str.strip,text[0:i0].split('\n')))
L=list(map(str.strip,text[i0:i1].split('\n')))
POSTAMBLE=list(map(str.strip,text[i1:].split('\n')))
def ambleText(L):
while L and not L[0]: L.pop(0)
while L:
T=[]
while L and L[0]:
T.append(L.pop(0))
yield T
while L and not L[0]: L.pop(0)
def mainText(L):
while L:
B = L.pop(0)
while not L[0]: L.pop(0)
T=[]
while L and L[0]:
T.append(L.pop(0))
while not L[0]: L.pop(0)
P = []
while L and not (L[0].startswith('Book ') and len(L[0].split())==2):
E=[]
while L and L[0]:
E.append(L.pop(0))
P.append(E)
if L:
while not L[0]: L.pop(0)
yield B,T,P
t1 = time()
print("open(%s,'r').read() took %.4f seconds" %(fn,t1-t0))
E.append([spacer,2])
E.append([fTitle,'<font color="red">%s</font>' % Title, InitialStyle])
E.append([fTitle,'<font size="-4">by</font> <font color="green">%s</font>' % Author, InitialStyle])
for T in ambleText(PREAMBLE):
E.append([p,'\n'.join(T)])
for (B,T,P) in mainText(L):
E.append([chapter,B])
E.append([p,'<font size="+1" color="Blue"><b>%s</b></font>' % '\n'.join(T),ParaStyle])
for x in P:
E.append([p,' '.join(x)])
firstPre = 1
for T in ambleText(POSTAMBLE):
E.append([p,'\n'.join(T)])
t3 = time()
print("Parsing into memory took %.4f seconds" %(t3-t1))
del L
t4 = time()
print("Deleting list of lines took %.4f seconds" %(t4-t3))
for i in range(len(E)):
E[i][0](*E[i][1:])
t5 = time()
print("Moving into platypus took %.4f seconds" %(t5-t4))
del E
t6 = time()
print("Deleting list of actions took %.4f seconds" %(t6-t5))
go()
t7 = time()
print("saving to PDF took %.4f seconds" %(t7-t6))
print("Total run took %.4f seconds"%(t7-t0))
import hashlib
print('file digest: %s' % hashlib.md5(open('dodyssey.pdf','rb').read()).hexdigest())
def run():
for fn in ('odyssey.full.txt','odyssey.txt'):
if os.path.isfile(fn):
parseOdyssey(fn)
break
def doProf(profname,func,*args,**kwd):
import hotshot, hotshot.stats
prof = hotshot.Profile(profname)
prof.runcall(func)
prof.close()
stats = hotshot.stats.load(profname)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
if __name__=='__main__':
if '--prof' in sys.argv:
doProf('dodyssey.prof',run)
else:
run()
| StarcoderdataPython |
3244582 | """
This is an example of a python module that is written to be both
a set of tests and a jupyter notebook source,
reusing the tests as API usage examples.
"""
# let's start with something simple: making sure we will see all graphics inline
# (the nice thing, btw, is that an empty line will split a text block, and an empty comment a code block
# --- but if they are attached, they are kept together. An empty line will *not* split a code block, though
# % jsroot on
#
# % matplotlib inline
# and also the imports that we'll need to use ROOT, matplotlib, and the helper scripts
import ROOT
import matplotlib.pyplot as plt
import mplbplot.decorateAxes
# Next, let's construct a simple ROOT histogram to play with
h1 = ROOT.TH1F("h1", "", 10, -5., 5.)
h1.FillRandom("gaus", 250)
# First plot it simply like a histogram with ROOT
c1 = ROOT.TCanvas("c1")
h1.Draw("HIST")
c1.Draw()
# Next, same thing with mplbplot
fig,ax = plt.subplots(num="c1")
ax.rhist(h1, color="k")
# THE END for now
| StarcoderdataPython |
3336747 | """Online matching net -- an online version of the nearest neighbor algorithm.
Author: <NAME> (<EMAIL>)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import tensorflow as tf
from fewshot.models.modules.example_memory import ExampleMemory
from fewshot.models.registry import RegisterModule
INF = 1e6
@RegisterModule('online_matchingnet_memory')
@RegisterModule('matchingnet_memory') # Legacy name
class OnlineMatchingNetMemory(ExampleMemory):
def compute_cosine_sim(self, a, b):
"""Computes cosine similarity."""
ab = tf.matmul(a, b, transpose_b=True) # [B, K1, K2]
anorm = tf.maximum(tf.sqrt(tf.reduce_sum(a**2, [-1])), 1e-7) # [B, K1]
bnorm = tf.maximum(tf.sqrt(tf.reduce_sum(b**2, [-1])), 1e-7) # [B, K2]
return ab / tf.expand_dims(anorm, 2) / tf.expand_dims(bnorm, 1)
def infer(self, x, t, storage, label):
"""Infer cluster ID. Either goes into one of the existing cluster
or become a new cluster. This procedure is for prediction purpose.
Args:
x: Input. [B, D]
Returns:
logits: Cluster logits. [B, M]
new_prob: New cluster probability. [B]
"""
B = x.shape[0]
K = self.max_classes
if tf.equal(t, 0):
return tf.zeros([B, K + 1],
dtype=self.dtype), tf.zeros([B], dtype=self.dtype) + INF
storage_ = storage[:, :t, :]
label_ = label[:, :t]
x_ = tf.expand_dims(x, 1) # [B, 1, D]
if self._similarity == "cosine":
logits = tf.squeeze(self.compute_cosine_sim(x_, storage_), 1) # [B, M]
kprob = tf.nn.softmax(logits * 7.5) # [B, M]
elif self._similarity == "euclidean":
logits = -tf.squeeze(self.compute_euclidean_dist_sq(x_, storage_),
1) # [B, M]
kprob = tf.nn.softmax(logits)
max_logits = tf.reduce_max(logits, [-1]) # [B]
clabel_onehot = tf.one_hot(label_, self.unknown_id + 1) # [B, M, C]
# [B, M, 1] * [B, M, C] = [B, C]
cprob = tf.reduce_sum(tf.expand_dims(kprob, -1) * clabel_onehot, [1])
cprob = tf.maximum(cprob, 1e-6) # Delta.
cprob.set_shape([B, K + 1])
new = (self._beta - max_logits) / self._gamma # [B]
# remain = (max_logits - self._beta) / self._gamma # [B]
return tf.math.log(cprob), new
| StarcoderdataPython |
3255423 | #!/usr/bin/python
#----------------------------------------------------------------------
# Be sure to add the python path that points to the LLDB shared library.
#
# # To use this in the embedded python interpreter using "lldb" just
# import it with the full path using the "command script import"
# command
# (lldb) command script import /path/to/cmdtemplate.py
#----------------------------------------------------------------------
import lldb
import commands
import optparse
import shlex
def create_framestats_options():
usage = "usage: %prog [options]"
description='''This command is meant to be an example of how to make an LLDB command that
does something useful, follows best practices, and exploits the SB API.
Specifically, this command computes the aggregate and average size of the variables in the current frame
and allows you to tweak exactly which variables are to be accounted in the computation.
'''
parser = optparse.OptionParser(description=description, prog='framestats',usage=usage)
parser.add_option('-i', '--in-scope', action='store_true', dest='inscope', help='in_scope_only = True', default=False)
parser.add_option('-a', '--arguments', action='store_true', dest='arguments', help='arguments = True', default=False)
parser.add_option('-l', '--locals', action='store_true', dest='locals', help='locals = True', default=False)
parser.add_option('-s', '--statics', action='store_true', dest='statics', help='statics = True', default=False)
return parser
def the_framestats_command(debugger, command, result, dict):
# Use the Shell Lexer to properly parse up command options just like a
# shell would
command_args = shlex.split(command)
parser = create_framestats_options()
try:
(options, args) = parser.parse_args(command_args)
except:
# if you don't handle exceptions, passing an incorrect argument to the OptionParser will cause LLDB to exit
# (courtesy of OptParse dealing with argument errors by throwing SystemExit)
result.SetError ("option parsing failed")
return
# in a command - the lldb.* convenience variables are not to be used
# and their values (if any) are undefined
# this is the best practice to access those objects from within a command
target = debugger.GetSelectedTarget()
process = target.GetProcess()
thread = process.GetSelectedThread()
frame = thread.GetSelectedFrame()
if not frame.IsValid():
return "no frame here"
# from now on, replace lldb.<thing>.whatever with <thing>.whatever
variables_list = frame.GetVariables(options.arguments, options.locals, options.statics, options.inscope)
variables_count = variables_list.GetSize()
if variables_count == 0:
print >> result, "no variables here"
return
total_size = 0
for i in range(0,variables_count):
variable = variables_list.GetValueAtIndex(i)
variable_type = variable.GetType()
total_size = total_size + variable_type.GetByteSize()
average_size = float(total_size) / variables_count
print >>result, "Your frame has %d variables. Their total size is %d bytes. The average size is %f bytes" % (variables_count,total_size,average_size)
# not returning anything is akin to returning success
def __lldb_init_module (debugger, dict):
# This initializer is being run from LLDB in the embedded command interpreter
# Make the options so we can generate the help text for the new LLDB
# command line command prior to registering it with LLDB below
parser = create_framestats_options()
the_framestats_command.__doc__ = parser.format_help()
# Add any commands contained in this module to LLDB
debugger.HandleCommand('command script add -f cmdtemplate.the_framestats_command framestats')
print 'The "framestats" command has been installed, type "help framestats" or "framestats --help" for detailed help.'
| StarcoderdataPython |
3294535 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class AccessRights(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The permissions assigned to the shared access policy.
"""
REGISTRY_READ = "RegistryRead"
REGISTRY_WRITE = "RegistryWrite"
SERVICE_CONNECT = "ServiceConnect"
DEVICE_CONNECT = "DeviceConnect"
REGISTRY_READ_REGISTRY_WRITE = "RegistryRead, RegistryWrite"
REGISTRY_READ_SERVICE_CONNECT = "RegistryRead, ServiceConnect"
REGISTRY_READ_DEVICE_CONNECT = "RegistryRead, DeviceConnect"
REGISTRY_WRITE_SERVICE_CONNECT = "RegistryWrite, ServiceConnect"
REGISTRY_WRITE_DEVICE_CONNECT = "RegistryWrite, DeviceConnect"
SERVICE_CONNECT_DEVICE_CONNECT = "ServiceConnect, DeviceConnect"
REGISTRY_READ_REGISTRY_WRITE_SERVICE_CONNECT = "RegistryRead, RegistryWrite, ServiceConnect"
REGISTRY_READ_REGISTRY_WRITE_DEVICE_CONNECT = "RegistryRead, RegistryWrite, DeviceConnect"
REGISTRY_READ_SERVICE_CONNECT_DEVICE_CONNECT = "RegistryRead, ServiceConnect, DeviceConnect"
REGISTRY_WRITE_SERVICE_CONNECT_DEVICE_CONNECT = "RegistryWrite, ServiceConnect, DeviceConnect"
REGISTRY_READ_REGISTRY_WRITE_SERVICE_CONNECT_DEVICE_CONNECT = "RegistryRead, RegistryWrite, ServiceConnect, DeviceConnect"
class Capabilities(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The capabilities and features enabled for the IoT hub.
"""
NONE = "None"
DEVICE_MANAGEMENT = "DeviceManagement"
class EndpointHealthStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Health statuses have following meanings. The 'healthy' status shows that the endpoint is
accepting messages as expected. The 'unhealthy' status shows that the endpoint is not accepting
messages as expected and IoT Hub is retrying to send data to this endpoint. The status of an
unhealthy endpoint will be updated to healthy when IoT Hub has established an eventually
consistent state of health. The 'dead' status shows that the endpoint is not accepting
messages, after IoT Hub retried sending messages for the retrial period. See IoT Hub metrics to
identify errors and monitor issues with endpoints. The 'unknown' status shows that the IoT Hub
has not established a connection with the endpoint. No messages have been delivered to or
rejected from this endpoint
"""
UNKNOWN = "unknown"
HEALTHY = "healthy"
UNHEALTHY = "unhealthy"
DEAD = "dead"
class IotHubNameUnavailabilityReason(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The reason for unavailability.
"""
INVALID = "Invalid"
ALREADY_EXISTS = "AlreadyExists"
class IotHubReplicaRoleType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Specific Role assigned to this location
"""
PRIMARY = "primary"
SECONDARY = "secondary"
class IotHubScaleType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of the scaling enabled.
"""
AUTOMATIC = "Automatic"
MANUAL = "Manual"
NONE = "None"
class IotHubSku(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The name of the SKU.
"""
F1 = "F1"
S1 = "S1"
S2 = "S2"
S3 = "S3"
B1 = "B1"
B2 = "B2"
B3 = "B3"
class IotHubSkuTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The billing tier for the IoT hub.
"""
FREE = "Free"
STANDARD = "Standard"
BASIC = "Basic"
class IpFilterActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The desired action for requests captured by this rule.
"""
ACCEPT = "Accept"
REJECT = "Reject"
class JobStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The status of the job.
"""
UNKNOWN = "unknown"
ENQUEUED = "enqueued"
RUNNING = "running"
COMPLETED = "completed"
FAILED = "failed"
CANCELLED = "cancelled"
class JobType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of the job.
"""
UNKNOWN = "unknown"
EXPORT = "export"
IMPORT_ENUM = "import"
BACKUP = "backup"
READ_DEVICE_PROPERTIES = "readDeviceProperties"
WRITE_DEVICE_PROPERTIES = "writeDeviceProperties"
UPDATE_DEVICE_CONFIGURATION = "updateDeviceConfiguration"
REBOOT_DEVICE = "rebootDevice"
FACTORY_RESET_DEVICE = "factoryResetDevice"
FIRMWARE_UPDATE = "firmwareUpdate"
class RouteErrorSeverity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Severity of the route error
"""
ERROR = "error"
WARNING = "warning"
class RoutingSource(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The source that the routing rule is to be applied to, such as DeviceMessages.
"""
INVALID = "Invalid"
DEVICE_MESSAGES = "DeviceMessages"
TWIN_CHANGE_EVENTS = "TwinChangeEvents"
DEVICE_LIFECYCLE_EVENTS = "DeviceLifecycleEvents"
DEVICE_JOB_LIFECYCLE_EVENTS = "DeviceJobLifecycleEvents"
DIGITAL_TWIN_CHANGE_EVENTS = "DigitalTwinChangeEvents"
class RoutingStorageContainerPropertiesEncoding(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Encoding that is used to serialize messages to blobs. Supported values are 'avro',
'avrodeflate', and 'JSON'. Default value is 'avro'.
"""
AVRO = "Avro"
AVRO_DEFLATE = "AvroDeflate"
JSON = "JSON"
class TestResultStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Result of testing route
"""
UNDEFINED = "undefined"
FALSE = "false"
TRUE = "true"
| StarcoderdataPython |
3238646 | # Copyright 2016 Ifwe Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Signalfx steps for feature tests."""
from behave import given, then
from .model_steps import parse_properties
from ..environment import add_config_val
import ast
@given(u'signalfx notifications are enabled')
def given_signalfx_notifications_are_enabled(context):
add_config_val(context, 'notifications',
dict(enabled_methods=['signalfx']),
use_list=True)
@then(u'there was a signalfx notification with {properties}')
def then_there_is_a_signalfx_notification_with(context, properties):
notifications = context.signalfx_server.get_notifications()
assert notifications is not None
assert len(notifications) > 0
attrs = parse_properties(properties)
try:
assert any(all(ast.literal_eval(str(req).strip('[]'))[attr] == attrs[attr] for attr in attrs)
for (req, _resp) in notifications)
except KeyError as e:
assert False, e
@then(u'a signalfx notification message contains dimensions {properties}')
def then_there_is_a_signalfx_notification_with_message_that_contains_dimensions(context, properties):
notifications = context.signalfx_server.get_notifications()
assert notifications is not None
assert len(notifications) > 0
attrs = parse_properties(properties)
try:
assert any(all(ast.literal_eval(str(req).strip('[]'))['dimensions'][attr] == attrs[attr] for attr in attrs)
for (req, _resp) in notifications)
except KeyError as e:
assert False, e
@then(u'a signalfx notification message contains properties {properties}')
def then_there_is_a_signalfx_notification_with_message_that_contains_properties(context, properties):
notifications = context.signalfx_server.get_notifications()
assert notifications is not None
assert len(notifications) > 0
attrs = parse_properties(properties)
try:
assert any(all(ast.literal_eval(str(req).strip('[]'))['properties'][attr] == attrs[attr] for attr in attrs)
for (req, _resp) in notifications)
except KeyError as e:
assert False, e
@then(u'there are {number} signalfx notifications')
def then_there_are_signalfx_notifications(context, number):
number = int(number)
notifications = context.signalfx_server.get_notifications()
assert notifications is not None
assert len(notifications) == number
@then(u'there is a signalfx failure')
def then_there_is_a_signalfx_failure(context):
notifications = context.signalfx_server.get_notifications()
assert notifications is not None
assert any(resp != 200 for (_req, resp) in notifications)
| StarcoderdataPython |
3264014 | <filename>geco/mips/set_cover/sun.py
from networkx.utils import py_random_state
from geco.mips.set_cover.generic import set_cover
def _sun_costs(n, seed):
return [seed.randint(1, 100) for _ in range(n)]
def _sun_sets(n, m, seed, initial_sets=None):
if not initial_sets:
sets = [set() for _ in range(m)]
else:
sets = list(initial_sets)
p = 0.05
for e in range(n):
# enforce element to appear in at least 2 sets
for s in (sets[i] for i in seed.sample(range(m), k=2)):
s.add(e)
# add element to set with probability p
for s in sets:
if seed.random() < p:
s.add(e)
return sets
@py_random_state(-1)
def sun_instance(n, m, seed=0):
"""
Generates instance for set cover generation as described in [1].
Parameters
----------
n: int
Number of elements
m: int
Number of set constraints
seed: integer, random_state, or None
Indicator of random number generation state
Returns
-------
model: scip.Model
A pyscipopt model of the generated instance
References
----------
.. [1] <NAME>, <NAME>, <NAME>, & <NAME> (2021).
Improving Learning to Branch via Reinforcement Learning. In Submitted to
International Conference on Learning
"""
return set_cover(*sun_params(n, m, seed))
@py_random_state(-1)
def sun_params(n, m, seed=0):
"""
Generates instance params for set cover generation as described in [1].
Parameters
----------
n: int
Number of elements
m: int
Number of set constraints
seed: integer, random_state, or None
Indicator of random number generation state
Returns
-------
costs: list[int]
Element costs in objective function
sets: list[set]
Definition of element requirement for each set
References
----------
.. [1] <NAME>, <NAME>, <NAME>, & <NAME> (2021).
Improving Learning to Branch via Reinforcement Learning. In Submitted to
International Conference on Learning
"""
return _sun_costs(n, seed), _sun_sets(n, m, seed, initial_sets=None)
@py_random_state(-1)
def expand_sun_params(new_params, base_result, seed=0):
"""
Implements the expansion from an existing set cover instance as described in [1].
Parameters
----------
new_params: tuple
New params for sun_params
base_result: tuple
Tuple of (costs, sets) that represent instance params of backbone
seed: integer, random_state, or None
Indicator of random number generation state
Returns
-------
costs: list[int]
Element costs in objective function
sets: list[set]
Definition of element requirement for each set
References
__________
.. [1] <NAME>, <NAME>, <NAME>, & <NAME> (2021).
Improving Learning to Branch via Reinforcement Learning. In Submitted to
International Conference on Learning
"""
n, *_ = new_params
base_costs, base_sets = base_result
assert n > len(base_costs)
costs = list(base_costs)
costs += _sun_costs(n - len(base_costs), seed)
return costs, _sun_sets(n, len(base_sets), seed, initial_sets=base_sets)
| StarcoderdataPython |
1794217 | <filename>tests/project_requirements_test.py<gh_stars>0
import unittest
import os, sys
test_dir = os.path.dirname(__file__)
src_dir = "../"
sys.path.insert(0, os.path.abspath(os.path.join(test_dir, src_dir)))
from bank.accounts import Accounts, CheckingAccount, SavingsAccount
from bank.account_holder import AccountHolder
from bank.banks import Bank
from bank.cards import Card
from bank.exceptions import InsufficientBalance, AccountError, ExceedsLimit
bank = Bank()
cormo = AccountHolder(bank, "101", "Mathias", "Cormann")
cormo_checking = CheckingAccount(
"101-checking-1", "checking", cormo.accounts, "101", opening_balance=1000.00
)
cormo_checking_card = Card(
cormo,
cormo_checking,
"Mathias",
"Cormann",
"40001|101-checking-1",
"0101",
"12-12-2024",
"432",
"active",
)
frydy = AccountHolder(bank, "202", "Josh", "Frydenberg")
frydy_savings = SavingsAccount(
"202-savings-1", "savings", frydy.accounts, "202", opening_balance=0.25
)
frydy_savings_card = Card(
frydy,
frydy_savings,
"Josh",
"Frydenberg",
"50001|101-savings-1",
"4321",
"12-12-2024",
"342",
"active",
)
assert bank.bank_balance == cormo_checking.balance + frydy_savings.balance
class BasicTests(unittest.TestCase):
"""
Requirements:
● Deposit, withdraw and maintain a balance for multiple customers.
● Return a customer’s balance and the bank’s total balance.
● Prevent customers from withdrawing more money than they have in their account.
"""
def test_a_withdraw_deposit(self):
# ● Deposit, withdraw and maintain a balance for multiple customers.
trans = bank.deposit_transaction(cormo_checking_card.card_number, 500.00)
trans2 = bank.deposit_transaction(frydy_savings_card.card_number, 500.00)
assert bank.bank_balance == cormo_checking.balance + frydy_savings.balance
trans3 = bank.withdrawal_transaction(cormo_checking_card.card_number, 500.00)
trans4 = bank.withdrawal_transaction(frydy_savings_card.card_number, 500.00)
assert bank.bank_balance == cormo_checking.balance + frydy_savings.balance
def test_b_more_ahs(self):
# Create 10K accounts, process withdrawals check balances.
bank = Bank()
n = 10000
for i in range(n):
ah = AccountHolder(bank, f"{i}1", "Mathias", "Cormann")
ac = CheckingAccount(
f"{i}1-checking-1",
"checking",
ah.accounts,
f"{i}1",
opening_balance=1000.00,
)
c = Card(
ah,
ac,
"Mathias",
"Cormann",
f"40001|{i}1-checking-1",
"0101",
"12-12-2024",
"432",
"active",
)
print(bank.bank_balance, len(bank.account_holders))
assert bank.bank_balance == 1000 * n
for id, acchldr in bank.account_holders.items():
bank.withdrawal_transaction(f"40001|{id}-checking-1", 500.00)
assert bank.bank_balance == 500 * n
def test_c_balance(self):
# ● Return a customer’s balance and the bank’s total balance.
trans = bank.deposit_transaction(cormo_checking_card.card_number, 500.00)
trans2 = bank.deposit_transaction(cormo_checking_card.card_number, 500.00)
assert bank.bank_balance == cormo_checking.balance + frydy_savings.balance
print(f"{bank.institution} balance: ${bank.bank_balance}")
print(f"{cormo_checking.account_id} balance: ${cormo_checking.balance}")
print(f"{frydy_savings.account_id} balance: ${frydy_savings.balance}")
def test_d_overdraw(self):
# ● Prevent customers from withdrawing more money than they have in their account.
# Attempt to withdraw $1 over current balance.
previous_balance = cormo_checking.balance
trans = bank.withdrawal_transaction(
cormo_checking_card.card_number, cormo_checking.balance + 1.00
)
# Should False for an unsuccessful status/trans.
assert not trans["status"]
# We also get an eplaination (the negative balance shown is done on purpose).
print(trans["error"])
# There should be no change in balance as the transaction was denied.
assert previous_balance == cormo_checking.balance
# Other tests of similar nature.
def test_e_stat(self):
# Exceed withdrawal limit eg. max $5000 can be taken per transaction.
# Deposit required amount.
bank.deposit_transaction(cormo_checking_card.card_number, 5000.00)
# Test the limit exception is handled and trans is denied + balance maintained.
previous_balance = cormo_checking.balance
trans = bank.withdrawal_transaction(cormo_checking_card.card_number, 5001.00)
assert not trans["status"]
print(trans["error"])
assert previous_balance == cormo_checking.balance
def test_f_stat(self):
# Account status if an account has been locked or closed.
cormo_checking.status = "locked"
trans = bank.withdrawal_transaction(cormo_checking_card.card_number, 500.00)
# Transaction should be denied.
assert not trans["status"]
print(trans["error"])
# Reopen account.
cormo_checking.status = "open"
trans = bank.withdrawal_transaction(cormo_checking_card.card_number, 500.00)
# We should have a successful transaction.
assert trans["status"]
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1794403 | """
<NAME>
<EMAIL>
@shackoverflow
surrender_index_bot.py
A Twitter bot that tracks every live game in the NFL,
and tweets out the "Surrender Index" of every punt
as it happens.
Inspired by SB Nation's <NAME> @jon_bois.
"""
import argparse
from base64 import urlsafe_b64encode
import chromedriver_autoinstaller
from datetime import datetime, timedelta, timezone
from dateutil import parser, tz
from email.mime.text import MIMEText
import espn_scraper as espn
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import json
import numpy as np
import os
import pickle
import scipy.stats as stats
from selenium import webdriver
from selenium.webdriver.support.select import Select
from selenium.common.exceptions import StaleElementReferenceException
from subprocess import Popen, PIPE
import sys
import threading
import time
import tweepy
from twilio.rest import Client
import traceback
# A dictionary of plays that have already been tweeted.
tweeted_plays = None
# A dictionary of the currently active games.
games = {}
# The authenticated Tweepy APIs.
api, ninety_api = None, None
# NPArray of historical surrender indices.
historical_surrender_indices = None
# Whether the bot should tweet out any punts
should_tweet = True
### SELENIUM FUNCTIONS ###
def get_game_driver(headless=True):
global debug
global not_headless
options = webdriver.ChromeOptions()
if headless and not debug and not not_headless:
options.add_argument("headless")
return webdriver.Chrome(options=options)
def get_twitter_driver(link, headless=False):
with open('credentials.json', 'r') as f:
credentials = json.load(f)
email = credentials['cancel_email']
username = credentials['cancel_username']
password = <PASSWORD>['cancel_password']
driver = get_game_driver(headless=headless)
driver.implicitly_wait(60)
driver.get(link)
driver.find_element_by_xpath("//div[@aria-label='Reply']").click()
time.sleep(1)
login_button = driver.find_element_by_xpath("//a[@data-testid='login']")
time.sleep(1)
driver.execute_script("arguments[0].click();", login_button)
email_field = driver.find_element_by_xpath(
"//input[@name='session[username_or_email]']")
password_field = driver.find_element_by_xpath(
"//input[@name='session[password]']")
email_field.send_keys(email)
password_field.send_keys(password)
driver.find_element_by_xpath(
"//div[@data-testid='LoginForm_Login_Button']").click()
time.sleep(1)
if 'email_disabled=true' in driver.current_url:
username_field = driver.find_element_by_xpath(
"//input[@name='session[username_or_email]']")
password_field = driver.find_element_by_xpath(
"//input[@name='session[password]']")
username_field.send_keys(username)
password_field.send_keys(password)
driver.find_element_by_xpath(
"//div[@data-testid='LoginForm_Login_Button']").click()
return driver
def get_inner_html_of_element(element):
return element.get_attribute("innerHTML")
def get_inner_html_of_elements(elements):
return list(map(get_inner_html_of_element, elements))
def construct_play_from_element(element):
title = get_inner_html_of_element(element.find_element_by_tag_name("h3"))
desc = get_inner_html_of_element(
element.find_element_by_tag_name("p").find_element_by_tag_name("span"))
desc = desc.lstrip().rstrip()
play = {}
if len(title) > 5:
down_dist, yrdln = title.split("at")
play['yard_line'] = yrdln.lstrip(" ")
play['down'] = down_dist[:3]
play['dist'] = down_dist.rstrip(" ").split(" ")[-1]
if 'goal' in play['dist'].lower():
play['dist'] = play['yard_line'].split(" ")[1]
start_index = desc.find("(") + 1
end_index = desc.find(")")
time_qtr = desc[start_index:end_index]
play['time'] = time_qtr.split("-")[0].rstrip(" ")
play['qtr'] = time_qtr.split("-")[1].lstrip(" ")
play['text'] = desc[end_index + 1:].lstrip(" ")
return play
def get_plays_from_drive(drive, game):
all_plays = drive.find_elements_by_tag_name("li")
good_plays = []
if is_final(game):
relevant_plays = all_plays[-3:]
else:
relevant_plays = all_plays[:3]
for play in relevant_plays:
if play.get_attribute("class") == '' or play.get_attribute(
"class") == 'video':
play_dct = construct_play_from_element(play)
if 'yard_line' in play_dct:
good_plays.append(play_dct)
return good_plays
def get_all_drives(game):
all_drives = game.find_elements_by_class_name("drive-list")
for drive in all_drives:
accordion_content = drive.find_element_by_xpath(
'..').find_element_by_xpath('..')
if "in" not in accordion_content.get_attribute("class"):
accordion_content.find_element_by_xpath('..').click()
time.sleep(0.5)
return all_drives
### POSSESSION DETERMINATION FUNCTIONS ###
def get_possessing_team_from_play_roster(play, game):
global punters
home, away = get_home_team(game), get_away_team(game)
home_punters, away_punters = punters[home], punters[away]
home_possession, away_possession = False, False
for home_punter in home_punters:
if home_punter in play['text']:
home_possession = True
for away_punter in away_punters:
if away_punter in play['text']:
away_possession = True
if home_possession == away_possession:
return ''
else:
return home if home_possession else away
def get_possessing_team_from_punt_distance(play, game):
try:
split = play['text'].split(" ")
if split[1] == 'punts':
if int(split[2]) > int(play['yard_line'].split(" ")[1]):
return play['yard_line'].split(" ")[0]
if 'touchback' in play['text'].lower():
punt_distance = int(split[2])
if punt_distance > 50:
return play['yard_line'].split(" ")[0]
else:
return return_other_team(game,
play['yard_line'].split(" ")[0])
punt_distance = int(split[2]) + int(split[6])
if punt_distance > 50:
return play['yard_line'].split(" ")[0]
else:
return return_other_team(game, play['yard_line'].split(" ")[0])
return ''
except BaseException:
return ''
def get_possessing_team_from_drive(drive):
accordion_header = drive.find_element_by_xpath('../../..')
team_logo = accordion_header.find_element_by_class_name('team-logo')
if team_logo.get_attribute("src") is None:
team_logo = team_logo.find_element_by_tag_name('img')
img_name = team_logo.get_attribute("src")
index = img_name.find(".png")
return img_name[index - 3:index].lstrip("/").upper()
def get_possessing_team(play, drive, game):
possessing_team = get_possessing_team_from_play_roster(play, game)
if possessing_team != '':
return possessing_team
possessing_team = get_possessing_team_from_punt_distance(play, game)
return possessing_team if possessing_team != '' else get_possessing_team_from_drive(
drive)
### TEAM ABBREVIATION FUNCTIONS ###
def get_abbreviations(game):
return get_inner_html_of_elements(
game.find_elements_by_class_name("abbrev"))
def get_home_team(game):
return get_abbreviations(game)[1]
def get_away_team(game):
return get_abbreviations(game)[0]
def return_other_team(game, team):
return get_away_team(game) if get_home_team(
game) == team else get_home_team(game)
### GAME INFO FUNCTIONS ###
def get_game_id(game):
return game.current_url[-14:-5]
def get_game_header(game):
header_eles = game.find_elements_by_css_selector('div.game-details.header')
return get_inner_html_of_element(
header_eles[0]) if len(header_eles) > 0 else ""
def is_final(game):
element = game.find_element_by_class_name("status-detail")
is_final = 'final' in get_inner_html_of_element(element).lower()
if debug:
time_print(("is final", is_final))
return is_final
def is_postseason(game):
header = get_game_header(game).lower()
is_postseason = 'playoff' in header or 'championship' in header or 'super bowl' in header
if debug:
time_print(("is postseason", is_postseason))
return is_postseason
### SCORE FUNCTIONS ###
def get_scores(game):
parent_elements = game.find_elements_by_class_name("score-container")
elements = list(
map(lambda x: x.find_element_by_tag_name("div"), parent_elements))
return get_inner_html_of_elements(elements)
def get_home_score(play, drive, drives, game):
drive_index = drives.index(drive)
return get_drive_scores(drives, drive_index, game)[1]
def get_away_score(play, drive, drives, game):
drive_index = drives.index(drive)
return get_drive_scores(drives, drive_index, game)[0]
def get_drive_scores(drives, index, game):
if is_final(game):
if index == 0:
drive = drives[0]
else:
drive = drives[index - 1]
else:
if index == len(drives) - 1:
drive = drives[-1]
else:
drive = drives[index + 1]
accordion_header = drive.find_element_by_xpath('../../..')
away_parent = accordion_header.find_element_by_class_name(
'home') # this is intentional, ESPN is dumb
home_parent = accordion_header.find_element_by_class_name(
'away') # this is intentional, ESPN is dumb
away_score_element = away_parent.find_element_by_class_name('team-score')
home_score_element = home_parent.find_element_by_class_name('team-score')
away_score, home_score = int(
get_inner_html_of_element(away_score_element)), int(
get_inner_html_of_element(home_score_element))
if debug:
time_print(("away score", away_score))
time_print(("home score", home_score))
return away_score, home_score
### PLAY FUNCTIONS ###
def is_punt(play):
text = play['text'].lower()
if 'fake punt' in text:
return False
if 'punts' in text:
return True
if 'punt is blocked' in text:
return True
if 'punt for ' in text:
return True
return False
def is_penalty(play):
return 'penalty' in play['text'].lower()
def get_yrdln_int(play):
return int(play['yard_line'].split(" ")[-1])
def get_field_side(play):
if '50' in play['yard_line']:
return None
else:
return play['yard_line'].split(" ")[0]
def get_time_str(play):
return play['time']
def get_qtr_num(play):
qtr = play['qtr']
if qtr == 'OT':
return 5
elif qtr == '2OT':
return 6
elif qtr == '3OT':
return 7
else:
return int(qtr[0])
def is_in_opposing_territory(play, drive, game):
is_in_opposing_territory = get_field_side(play) != get_possessing_team(
play, drive, game)
if debug:
time_print(("is in opposing territory", is_in_opposing_territory))
return is_in_opposing_territory
def get_dist_num(play):
return int(play['dist'])
### CALCULATION HELPER FUNCTIONS ###
def calc_seconds_from_time_str(time_str):
minutes, seconds = map(int, time_str.split(":"))
return minutes * 60 + seconds
def calc_seconds_since_halftime(play, game):
# Regular season games have only one overtime of length 10 minutes
if not is_postseason(game) and get_qtr_num(play) == 5:
seconds_elapsed_in_qtr = (10 * 60) - calc_seconds_from_time_str(
get_time_str(play))
else:
seconds_elapsed_in_qtr = (15 * 60) - calc_seconds_from_time_str(
get_time_str(play))
seconds_since_halftime = max(
seconds_elapsed_in_qtr + (15 * 60) * (get_qtr_num(play) - 3), 0)
if debug:
time_print(("seconds since halftime", seconds_since_halftime))
return seconds_since_halftime
def calc_score_diff(play, drive, drives, game):
drive_index = drives.index(drive)
away, home = get_drive_scores(drives, drive_index, game)
if get_possessing_team(play, drive, game) == get_home_team(game):
score_diff = int(home) - int(away)
else:
score_diff = int(away) - int(home)
if debug:
time_print(("score diff", score_diff))
return score_diff
### SURRENDER INDEX FUNCTIONS ###
def calc_field_pos_score(play, drive, game):
try:
if get_yrdln_int(play) == 50:
return (1.1)**10.
if not is_in_opposing_territory(play, drive, game):
return max(1., (1.1)**(get_yrdln_int(play) - 40))
else:
return (1.2)**(50 - get_yrdln_int(play)) * ((1.1)**(10))
except BaseException:
return 0.
def calc_yds_to_go_multiplier(play):
dist = get_dist_num(play)
if dist >= 10:
return 0.2
elif dist >= 7:
return 0.4
elif dist >= 4:
return 0.6
elif dist >= 2:
return 0.8
else:
return 1.
def calc_score_multiplier(play, drive, drives, game):
score_diff = calc_score_diff(play, drive, drives, game)
if score_diff > 0:
return 1.
elif score_diff == 0:
return 2.
elif score_diff < -8.:
return 3.
else:
return 4.
def calc_clock_multiplier(play, drive, drives, game):
if calc_score_diff(play, drive, drives,
game) <= 0 and get_qtr_num(play) > 2:
seconds_since_halftime = calc_seconds_since_halftime(play, game)
return ((seconds_since_halftime * 0.001)**3.) + 1.
else:
return 1.
def calc_surrender_index(play, drive, drives, game):
field_pos_score = calc_field_pos_score(play, drive, game)
yds_to_go_mult = calc_yds_to_go_multiplier(play)
score_mult = calc_score_multiplier(play, drive, drives, game)
clock_mult = calc_clock_multiplier(play, drive, drives, game)
if debug:
time_print(play)
time_print("")
time_print(("field pos score", field_pos_score))
time_print(("yds to go mult", yds_to_go_mult))
time_print(("score mult", score_mult))
time_print(("clock mult", clock_mult))
return field_pos_score * yds_to_go_mult * score_mult * clock_mult
### PUNTER FUNCTIONS ###
def find_punters_for_team(team, roster):
base_link = 'https://www.espn.com/nfl/team/roster/_/name/'
roster_link = base_link + team
roster.get(roster_link)
header = roster.find_element_by_css_selector("div.Special.Teams")
parents = header.find_elements_by_css_selector(
"td.Table__TD:not(.Table__TD--headshot)")
punters = set()
for parent in parents:
try:
ele = parent.find_element_by_class_name("AnchorLink")
full_name = ele.get_attribute("innerHTML")
split = full_name.split(" ")
first_initial_last = full_name[0] + '.' + split[-1]
punters.add(first_initial_last)
except BaseException:
pass
return punters
def download_punters():
global punters
punters = {}
if os.path.exists('punters.json'):
file_mod_time = os.path.getmtime('punters.json')
else:
file_mod_time = 0.
if time.time() - file_mod_time < 60 * 60 * 12:
# if file modified within past 12 hours
with open('punters.json', 'r') as f:
punters_list = json.load(f)
for key, value in punters_list.items():
punters[key] = set(value)
else:
team_abbreviations = [
'ARI',
'ATL',
'BAL',
'BUF',
'CAR',
'CHI',
'CIN',
'CLE',
'DAL',
'DEN',
'DET',
'GB',
'HOU',
'IND',
'JAX',
'KC',
'LAC',
'LAR',
'LV',
'MIA',
'MIN',
'NE',
'NO',
'NYG',
'NYJ',
'PHI',
'PIT',
'SEA',
'SF',
'TB',
'TEN',
'WSH',
]
roster = get_game_driver()
for team in team_abbreviations:
time_print("Downloading punters for " + team)
punters[team] = find_punters_for_team(team, roster)
roster.quit()
punters_list = {}
for key, value in punters.items():
punters_list[key] = list(value)
with open('punters.json', 'w') as f:
json.dump(punters_list, f)
### STRING FORMAT FUNCTIONS ###
def get_pretty_time_str(time_str):
return time_str[1:] if time_str[0] == '0' and time_str[1] != ':' else time_str
def get_qtr_str(qtr):
return qtr if 'OT' in qtr else 'the ' + get_num_str(int(qtr[0]))
def get_ordinal_suffix(num):
last_digit = str(num)[-1]
if last_digit == '1':
return 'st'
elif last_digit == '2':
return 'nd'
elif last_digit == '3':
return 'rd'
else:
return 'th'
def get_num_str(num):
rounded_num = int(num) # round down
if rounded_num % 100 == 11 or rounded_num % 100 == 12 or rounded_num % 100 == 13:
return str(rounded_num) + 'th'
# add more precision for 99th percentile
if rounded_num == 99:
if num < 99.9:
return str(round(num, 1)) + get_ordinal_suffix(round(num, 1))
elif num < 99.99:
return str(round(num, 2)) + get_ordinal_suffix(round(num, 2))
else:
# round down
multiplied = int(num * 1000)
rounded_down = float(multiplied) / 1000
return str(rounded_down) + get_ordinal_suffix(rounded_down)
return str(rounded_num) + get_ordinal_suffix(rounded_num)
def pretty_score_str(score_1, score_2):
if score_1 > score_2:
ret_str = 'winning '
elif score_2 > score_1:
ret_str = 'losing '
else:
ret_str = 'tied '
ret_str += str(score_1) + ' to ' + str(score_2)
return ret_str
def get_score_str(play, drive, drives, game):
if get_possessing_team(play, drive, game) == get_home_team(game):
return pretty_score_str(get_home_score(play, drive, drives, game),
get_away_score(play, drive, drives, game))
else:
return pretty_score_str(get_away_score(play, drive, drives, game),
get_home_score(play, drive, drives, game))
### DELAY OF GAME FUNCTIONS ###
def is_delay_of_game(play, prev_play):
return 'delay of game' in prev_play['text'].lower(
) and get_dist_num(play) - get_dist_num(prev_play) > 0
### HISTORY FUNCTIONS ###
def has_been_tweeted(play, drive, game, game_id):
global tweeted_plays
game_plays = tweeted_plays.get(game_id, [])
for old_play in list(game_plays):
old_possessing_team, old_qtr, old_time = old_play.split('_')
new_possessing_team, new_qtr, new_time = play_hash(play, drive,
game).split('_')
if old_possessing_team == new_possessing_team and old_qtr == new_qtr and abs(
calc_seconds_from_time_str(old_time) -
calc_seconds_from_time_str(new_time)) < 50:
# Check if the team with possession and quarter are the same, and
# if the game clock at the start of the play is within 50 seconds.
return True
return False
def has_been_seen(play, drive, game, game_id):
global seen_plays
game_plays = seen_plays.get(game_id, [])
for old_play in list(game_plays):
if old_play == deep_play_hash(play, drive, game):
return True
game_plays.append(deep_play_hash(play, drive, game))
seen_plays[game_id] = game_plays
return False
def penalty_has_been_seen(play, drive, game, game_id):
global penalty_seen_plays
game_plays = penalty_seen_plays.get(game_id, [])
for old_play in list(game_plays):
if old_play == deep_play_hash(play, drive, game):
return True
game_plays.append(deep_play_hash(play, drive, game))
penalty_seen_plays[game_id] = game_plays
return False
def has_been_final(game_id):
global final_games
if game_id in final_games:
return True
final_games.add(game_id)
return False
def play_hash(play, drive, game):
possessing_team = get_possessing_team(play, drive, game)
qtr = play['qtr']
time = play['time']
return possessing_team + '_' + qtr + '_' + time
def deep_play_hash(play, drive, game):
possessing_team = get_possessing_team(play, drive, game)
qtr = play['qtr']
time = play['time']
down = play['down']
dist = play['dist']
yard_line = play['yard_line']
return possessing_team + '_' + qtr + '_' + time + \
'_' + down + '_' + dist + '_' + yard_line
def load_tweeted_plays_dict():
global tweeted_plays
tweeted_plays = {}
if os.path.exists('tweeted_plays.json'):
file_mod_time = os.path.getmtime('tweeted_plays.json')
else:
file_mod_time = 0.
if time.time() - file_mod_time < 60 * 60 * 12:
# if file modified within past 12 hours
with open('tweeted_plays.json', 'r') as f:
tweeted_plays = json.load(f)
else:
with open('tweeted_plays.json', 'w') as f:
json.dump(tweeted_plays, f)
def update_tweeted_plays(play, drive, game, game_id):
global tweeted_plays
game_plays = tweeted_plays.get(game_id, [])
game_plays.append(play_hash(play, drive, game))
tweeted_plays[game_id] = game_plays
with open('tweeted_plays.json', 'w') as f:
json.dump(tweeted_plays, f)
### PERCENTILE FUNCTIONS ###
def load_historical_surrender_indices():
with open('1999-2020_surrender_indices.npy', 'rb') as f:
return np.load(f)
def load_current_surrender_indices():
try:
with open('current_surrender_indices.npy', 'rb') as f:
return np.load(f)
except BaseException:
return np.array([])
def write_current_surrender_indices(surrender_indices):
with open('current_surrender_indices.npy', 'wb') as f:
np.save(f, surrender_indices)
def calculate_percentiles(surrender_index, should_update_file=True):
global historical_surrender_indices
current_surrender_indices = load_current_surrender_indices()
current_percentile = stats.percentileofscore(current_surrender_indices,
surrender_index,
kind='strict')
all_surrender_indices = np.concatenate(
(historical_surrender_indices, current_surrender_indices))
historical_percentile = stats.percentileofscore(all_surrender_indices,
surrender_index,
kind='strict')
if should_update_file:
current_surrender_indices = np.append(current_surrender_indices,
surrender_index)
write_current_surrender_indices(current_surrender_indices)
return current_percentile, historical_percentile
### TWITTER FUNCTIONS ###
def initialize_api():
with open('credentials.json', 'r') as f:
credentials = json.load(f)
auth = tweepy.OAuthHandler(credentials['consumer_key'],
credentials['consumer_secret'])
auth.set_access_token(credentials['access_token'],
credentials['access_token_secret'])
api = tweepy.API(auth)
auth = tweepy.OAuthHandler(credentials['90_consumer_key'],
credentials['90_consumer_secret'])
auth.set_access_token(credentials['90_access_token'],
credentials['90_access_token_secret'])
ninety_api = tweepy.API(auth)
auth = tweepy.OAuthHandler(credentials['cancel_consumer_key'],
credentials['cancel_consumer_secret'])
auth.set_access_token(credentials['cancel_access_token'],
credentials['cancel_access_token_secret'])
cancel_api = tweepy.API(auth)
return api, ninety_api, cancel_api
def initialize_gmail_client():
with open('credentials.json', 'r') as f:
credentials = json.load(f)
SCOPES = ['https://www.googleapis.com/auth/gmail.compose']
email = credentials['gmail_email']
creds = None
if os.path.exists("gmail_token.pickle"):
with open("gmail_token.pickle", "rb") as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'gmail_credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
with open("gmail_token.pickle", "wb") as token:
pickle.dump(creds, token)
return build('gmail', 'v1', credentials=creds)
def initialize_twilio_client():
with open('credentials.json', 'r') as f:
credentials = json.load(f)
return Client(credentials['twilio_account_sid'],
credentials['twilio_auth_token'])
def send_message(body):
global gmail_client
global twilio_client
global notify_using_twilio
with open('credentials.json', 'r') as f:
credentials = json.load(f)
if notify_using_twilio:
message = twilio_client.messages.create(
body=body,
from_=credentials['from_phone_number'],
to=credentials['to_phone_number'])
elif notify_using_native_mail:
script = """tell application "Mail"
set newMessage to make new outgoing message with properties {{visible:false, subject:"{}", sender:"{}", content:"{}"}}
tell newMessage
make new to recipient with properties {{address:"{}"}}
end tell
send newMessage
end tell
tell application "System Events"
set visible of application process "Mail" to false
end tell
"""
formatted_script = script.format(
body, credentials['gmail_email'], body, credentials['gmail_email'])
p = Popen('/usr/bin/osascript', stdin=PIPE,
stdout=PIPE, encoding='utf8')
p.communicate(formatted_script)
else:
message = MIMEText(body)
message['to'] = credentials['gmail_email']
message['from'] = credentials['gmail_email']
message['subject'] = body
message_obj = {'raw': urlsafe_b64encode(message.as_bytes()).decode()}
gmail_client.users().messages().send(userId="me", body=message_obj).execute()
def send_heartbeat_message(should_repeat=True):
global should_text
while True:
if should_text:
send_message("The Surrender Index script is up and running.")
if not should_repeat:
break
time.sleep(60 * 60 * 24)
def send_error_message(e, body="An error occurred"):
global should_text
if should_text:
send_message(body + ": " + str(e) + ".")
def create_delay_of_game_str(play, drive, game, prev_play,
unadjusted_surrender_index,
unadjusted_current_percentile,
unadjusted_historical_percentile):
if get_yrdln_int(play) == 50:
new_territory_str = '50'
else:
new_territory_str = play['yard_line']
if get_yrdln_int(prev_play) == 50:
old_territory_str = '50'
else:
old_territory_str = prev_play['yard_line']
penalty_str = "*" + get_possessing_team(
play, drive,
game) + " committed a (likely intentional) delay of game penalty, "
old_yrdln_str = "moving the play from " + prev_play[
'down'] + ' & ' + prev_play['dist'] + " at the " + prev_play[
'yard_line']
new_yrdln_str = " to " + play['down'] + ' & ' + play[
'dist'] + " at the " + play['yard_line'] + ".\n\n"
index_str = "If this penalty was in fact unintentional, the Surrender Index would be " + str(
round(unadjusted_surrender_index, 2)) + ", "
percentile_str = "ranking at the " + get_num_str(
unadjusted_current_percentile) + " percentile of the 2021 season."
return penalty_str + old_yrdln_str + new_yrdln_str + index_str + percentile_str
def create_tweet_str(play,
drive,
drives,
game,
surrender_index,
current_percentile,
historical_percentile,
delay_of_game=False):
territory_str = '50' if get_yrdln_int(play) == 50 else play['yard_line']
asterisk = '*' if delay_of_game else ''
decided_str = get_possessing_team(
play, drive, game) + ' decided to punt to ' + return_other_team(
game, get_possessing_team(play, drive, game))
yrdln_str = ' from the ' + territory_str + asterisk + ' on '
down_str = play['down'] + ' & ' + play['dist'] + asterisk
clock_str = ' with ' + get_pretty_time_str(play['time']) + ' remaining in '
qtr_str = get_qtr_str(play['qtr']) + ' while ' + get_score_str(
play, drive, drives, game) + '.'
play_str = decided_str + yrdln_str + down_str + clock_str + qtr_str
surrender_str = 'With a Surrender Index of ' + str(
round(surrender_index, 2)
) + ', this punt ranks at the ' + get_num_str(
current_percentile
) + ' percentile of cowardly punts of the 2021 season, and the ' + get_num_str(
historical_percentile) + ' percentile of all punts since 1999.'
return play_str + '\n\n' + surrender_str
def tweet_play(play, prev_play, drive, drives, game, game_id):
global api
global ninety_api
global cancel_api
global should_tweet
delay_of_game = is_delay_of_game(play, prev_play)
if delay_of_game:
updated_play = play.copy()
updated_play['dist'] = prev_play['dist']
updated_play['yard_line'] = prev_play['yard_line']
surrender_index = calc_surrender_index(updated_play, drive, drives,
game)
current_percentile, historical_percentile = calculate_percentiles(
surrender_index)
unadjusted_surrender_index = calc_surrender_index(
play, drive, drives, game)
unadjusted_current_percentile, unadjusted_historical_percentile = calculate_percentiles(
unadjusted_surrender_index, should_update_file=False)
tweet_str = create_tweet_str(updated_play, drive, drives, game,
surrender_index, current_percentile,
historical_percentile, delay_of_game)
else:
surrender_index = calc_surrender_index(play, drive, drives, game)
current_percentile, historical_percentile = calculate_percentiles(
surrender_index)
tweet_str = create_tweet_str(play, drive, drives, game,
surrender_index, current_percentile,
historical_percentile, delay_of_game)
time_print(tweet_str)
if delay_of_game:
delay_of_game_str = create_delay_of_game_str(
play, drive, game, prev_play, unadjusted_surrender_index,
unadjusted_current_percentile, unadjusted_historical_percentile)
time_print(delay_of_game_str)
if should_tweet:
status = api.update_status(tweet_str)
if delay_of_game:
api.update_status(delay_of_game_str,
in_reply_to_status_id=status.id_str)
# Post the status to the 90th percentile account.
if current_percentile >= 90. and should_tweet:
ninety_status = ninety_api.update_status(tweet_str)
if delay_of_game:
ninety_api.update_status(
delay_of_game_str, in_reply_to_status_id=ninety_status.id_str)
thread = threading.Thread(target=handle_cancel,
args=(ninety_status._json, tweet_str))
thread.start()
update_tweeted_plays(play, drive, game, game_id)
### CANCEL FUNCTIONS ###
def post_reply_poll(link):
driver = get_twitter_driver(link)
driver.find_element_by_xpath("//div[@aria-label='Reply']").click()
driver.find_element_by_xpath("//div[@aria-label='Add poll']").click()
driver.find_element_by_name("Choice1").send_keys("Yes")
driver.find_element_by_name("Choice2").send_keys("No")
Select(driver.find_element_by_xpath(
"//select[@aria-label='Days']")).select_by_visible_text("0")
Select(driver.find_element_by_xpath(
"//select[@aria-label='Hours']")).select_by_visible_text("1")
Select(driver.find_element_by_xpath(
"//select[@aria-label='Minutes']")).select_by_visible_text("0")
driver.find_element_by_xpath("//div[@aria-label='Tweet text']").send_keys(
"Should this punt's Surrender Index be canceled?")
driver.find_element_by_xpath("//div[@data-testid='tweetButton']").click()
time.sleep(10)
driver.close()
def check_reply(link):
time.sleep(61 * 60) # Wait one hour and one minute to check reply
driver = get_game_driver(headless=False)
driver.get(link)
time.sleep(3)
poll_title = driver.find_element_by_xpath("//*[contains(text(), 'votes')]")
poll_content = poll_title.find_element_by_xpath("./../../../..")
poll_result = poll_content.find_elements_by_tag_name("span")
poll_values = [poll_result[2], poll_result[5]]
poll_floats = list(
map(lambda x: float(x.get_attribute("innerHTML").strip('%')),
poll_values))
driver.close()
time_print(("checking poll results: ", poll_floats))
return poll_floats[0] >= 66.67 if len(poll_floats) == 2 else None
def cancel_punt(orig_status, full_text):
global ninety_api
global cancel_api
ninety_api.destroy_status(orig_status['id'])
cancel_status = cancel_api.update_status(full_text)._json
new_cancel_text = 'CANCELED https://twitter.com/CancelSurrender/status/' + cancel_status[
'id_str']
time.sleep(10)
ninety_api.update_status(new_cancel_text)
def handle_cancel(orig_status, full_text):
try:
orig_link = 'https://twitter.com/surrender_idx90/status/' + orig_status[
'id_str']
post_reply_poll(orig_link)
if check_reply(orig_link):
cancel_punt(orig_status, full_text)
except Exception as e:
traceback.print_exc()
time_print("An error occurred when trying to handle canceling a tweet")
time_print(orig_status)
time_print(e)
send_error_message(
e, "An error occurred when trying to handle canceling a tweet")
### CURRENT GAME FUNCTIONS ###
def time_print(message):
print(get_current_time_str() + ": " + str(message))
def get_current_time_str():
return datetime.now().strftime("%b %-d at %-I:%M:%S %p")
def get_now():
return datetime.now(tz=tz.gettz())
def update_current_year_games():
global current_year_games
two_months_ago = get_now() - timedelta(days=60)
scoreboard_urls = espn.get_all_scoreboard_urls("nfl", two_months_ago.year)
current_year_games = []
for scoreboard_url in scoreboard_urls:
data = None
backoff_time = 1.
while data is None:
try:
data = espn.get_url(scoreboard_url)
except BaseException:
time.sleep(backoff_time)
backoff_time *= 2.
for event in data['content']['sbData']['events']:
current_year_games.append(event)
def get_active_game_ids():
global current_year_games
global completed_game_ids
now = get_now()
active_game_ids = set()
for game in current_year_games:
if game['id'] in completed_game_ids:
# ignore any games that are marked completed (which is done by
# checking if ESPN says final)
continue
game_time = parser.parse(
game['date']).replace(tzinfo=timezone.utc).astimezone(tz=None)
if game_time - timedelta(minutes=15) < now and game_time + timedelta(
hours=6) > now:
# game should start within 15 minutes and not started more than 6
# hours ago
active_game_ids.add(game['id'])
return active_game_ids
def clean_games(active_game_ids):
global games
global clean_immediately
global disable_final_check
global completed_game_ids
for game_id in list(games.keys()):
if game_id not in active_game_ids:
games[game_id].quit()
del games[game_id]
if not disable_final_check:
if is_final(games[game_id]):
if has_been_final(game_id) or clean_immediately:
completed_game_ids.add(game_id)
games[game_id].quit()
del games[game_id]
def download_data_for_active_games():
global games
active_game_ids = get_active_game_ids()
if len(active_game_ids) == 0:
time_print("No games active. Sleeping for 15 minutes...")
time.sleep(14 * 60) # We sleep for another minute in the live callback
game_added = False
for game_id in active_game_ids:
if game_id not in games:
game = get_game_driver()
base_link = 'https://www.espn.com/nfl/playbyplay?gameId='
game_link = base_link + game_id
game.get(game_link)
games[game_id] = game
game_added = True
if game_added:
time_print("Sleeping 10 seconds for game to load")
time.sleep(10)
clean_games(active_game_ids)
live_callback()
### MAIN FUNCTIONS ###
def live_callback():
global games
start_time = time.time()
for game_id, game in games.items():
try:
time_print('Getting data for game ID ' + game_id)
drives = get_all_drives(game)
for index, drive in enumerate(drives):
num_printed = 0
drive_plays = get_plays_from_drive(drive, game)
for play_index, play in enumerate(drive_plays):
if debug and index == 0 and num_printed < 3:
time_print(play['text'])
num_printed += 1
if not is_punt(play):
continue
if is_penalty(play):
if is_final(game):
if play_index != len(drive_plays) - 1:
continue
else:
if play_index != 0:
continue
if not penalty_has_been_seen(play, drive, game,
game_id):
continue
if has_been_tweeted(play, drive, game, game_id):
continue
if not has_been_seen(play, drive, game, game_id):
continue
if is_final(game):
prev_play = drive_plays[play_index -
1] if play_index > 0 else play
else:
prev_play = drive_plays[play_index +
1] if play_index + 1 < len(drive_plays) else play
tweet_play(play, prev_play, drive, drives, game, game_id)
time_print("Done getting data for game ID " + game_id)
except StaleElementReferenceException:
time_print("stale element, sleeping for 1 second.")
time.sleep(1)
return
while (time.time() < start_time + 60):
time.sleep(1)
def main():
global api
global ninety_api
global cancel_api
global historical_surrender_indices
global should_text
global should_tweet
global notify_using_native_mail
global notify_using_twilio
global final_games
global debug
global not_headless
global clean_immediately
global disable_final_check
global sleep_time
global seen_plays
global penalty_seen_plays
global gmail_client
global twilio_client
global completed_game_ids
parser = argparse.ArgumentParser(
description="Run the Surrender Index bot.")
parser.add_argument('--disableTweeting',
action='store_true',
dest='disableTweeting')
parser.add_argument('--disableNotifications',
action='store_true',
dest='disableNotifications')
parser.add_argument('--notifyUsingTwilio',
action='store_true',
dest='notifyUsingTwilio')
parser.add_argument('--debug', action='store_true', dest='debug')
parser.add_argument('--notHeadless', action='store_true', dest='notHeadless')
parser.add_argument('--disableFinalCheck',
action='store_true',
dest='disableFinalCheck')
args = parser.parse_args()
should_tweet = not args.disableTweeting
should_text = not args.disableNotifications
notify_using_twilio = args.notifyUsingTwilio
notify_using_native_mail = sys.platform == "darwin" and not notify_using_twilio
debug = args.debug
not_headless = args.notHeadless
disable_final_check = args.disableFinalCheck
print("Tweeting Enabled" if should_tweet else "Tweeting Disabled")
api, ninety_api, cancel_api = initialize_api()
historical_surrender_indices = load_historical_surrender_indices()
sleep_time = 1
clean_immediately = True
completed_game_ids = set()
final_games = set()
should_continue = True
while should_continue:
try:
chromedriver_autoinstaller.install()
# update current year games and punters at 5 AM every day
if notify_using_twilio:
twilio_client = initialize_twilio_client()
elif not notify_using_native_mail:
gmail_client = initialize_gmail_client()
send_heartbeat_message(should_repeat=False)
update_current_year_games()
download_punters()
load_tweeted_plays_dict()
seen_plays, penalty_seen_plays = {}, {}
now = get_now()
if now.hour < 5:
stop_date = now.replace(hour=5,
minute=0,
second=0,
microsecond=0)
else:
now += timedelta(days=1)
stop_date = now.replace(hour=5,
minute=0,
second=0,
microsecond=0)
while get_now() < stop_date:
start_time = time.time()
download_data_for_active_games()
clean_immediately = False
sleep_time = 1.
except KeyboardInterrupt:
should_continue = False
except Exception as e:
# When an exception occurs: log it, send a message, and sleep for an
# exponential backoff time
traceback.print_exc()
time_print("Error occurred:")
time_print(e)
time_print("Sleeping for " + str(sleep_time) + " minutes")
send_error_message(e)
time.sleep(sleep_time * 60)
sleep_time *= 2
if __name__ == "__main__":
main()
| StarcoderdataPython |
3379109 | import queue
from time import sleep
from typing import Any, Dict, List, Optional, Tuple
import zmq # type: ignore
from virtualgrid.base_node import BaseNode
from virtualgrid.job import Job
from virtualgrid.messages import (JobNotAcceptedMessage, GetLoadMessage, GetStatusMessage, JobAcceptedMessage,
JobStartedMessage, LoadMessage, JobMessage, StartJobMessage, StatusMessage,
UnknownMessageError, GiveJobMessage, OptionalJobMessage)
from virtualgrid.vector_clock import VectorClock
SCHEDULER_SLEEP_TIME = 5 # seconds
class ResourceManager(BaseNode):
def __init__(self,
id_: int,
max_job_queue_size: int,
gs_address: str,
node_addresses: List[str],
port: int,
clock: VectorClock
) -> None:
super().__init__(clock, port)
self.context = zmq.Context()
self.id_ = id_
self.gs_address = gs_address
self.node_addresses = node_addresses
self.job_queue = queue.Queue(max_job_queue_size)
def run_job_scheduler(self):
"""
Run the job scheduler.
This method tries to schedule a jobs using _schedule_job().
If scheduling a job is successful, it tries to schedule another
one immediately. In case it fails, it sleeps for SCHEDULER_SLEEP_TIME
seconds.
"""
timestamp = self.clock.register_event()
log_message = "Started the job scheduler"
self._log(timestamp, log_message)
while True:
job, node_address = self._schedule_job()
timestamp = self.clock.register_event()
if job:
log_message = f"Scheduled job {job} on the node {node_address}"
self._log(timestamp, log_message)
else:
log_message = f"Couldn't schedule a job, waiting {SCHEDULER_SLEEP_TIME}s"
self._log(timestamp, log_message)
sleep(SCHEDULER_SLEEP_TIME)
def _schedule_job(self) -> Tuple[Optional[Job], Optional[str]]:
"""
Try to schedule a job from the queue.
Returns a tuple containing the scheduled job and the address of the node
on which it was scheduled.
If scheduling was not successful (i.e. there was no job in the queue
or there was no free node to schedule it on), two Nones are returned.
"""
if self.job_queue.empty():
return None, None
node_address = self._get_idle_node()
if node_address is None:
return None, None
try:
job = self.job_queue.get(block=False)
except queue.Empty:
return None, None
self._submit_job(job, node_address)
return job, node_address
def _handle_valid_message(self, message: Any) -> Any:
"""
Handle a valid (already unpickled) message, and return a message
to send back.
In case there's no action associated with the message,
UnknownMessageError is sent back.
"""
if isinstance(message, GetLoadMessage):
self.clock.receive_message(message.clock)
return self._make_message(LoadMessage, self.get_load())
elif isinstance(message, JobMessage):
timestamp = self.clock.receive_message(message.clock)
log_message = f"Received job {message.job}"
self._log(timestamp, log_message)
queued = self._queue_job(message.job)
if queued:
return self._make_message(JobAcceptedMessage)
else:
return self._offload_job_to_gs(message.job)
elif isinstance(message, GiveJobMessage):
timestamp = self.clock.receive_message(message.clock)
log_message = f"Received give job message"
self._log(timestamp, log_message)
try:
job = self.job_queue.get(block=False)
except queue.Empty:
job = None
return self._make_message(OptionalJobMessage, job)
else:
timestamp = self.clock.register_event()
log_message = f"Received unknown message: {message}"
self._log(timestamp, log_message)
return self._make_message(UnknownMessageError)
def _queue_job(self, job: Job) -> bool:
"""
Try to schedule a job using this resource manager. Return value True
means it was successful, False means it wasn't.
If a job can be scheduled on this cluster, the job is accepted by this
resource manager.
"""
try:
self.job_queue.put(job, block=False)
timestamp = self.clock.register_event()
message = f"Queued {job}"
self._log(timestamp, message)
return True
except queue.Full:
return False
def _offload_job_to_gs(self, job: Job) -> Any:
"""
Offload a job to the grid scheduler.
A situation like that can happen whenever the resource manager thinks
he can't handle more jobs, and wants this job to be scheduled on a different
cluster by the grid scheduler.
"""
timestamp = self.clock.register_event()
log_message = f"Can't accept job ID={job.id}, offloading it to the grid scheduler"
self._log(timestamp, log_message)
socket = self.context.socket(zmq.REQ)
socket.connect(f'tcp://{self.gs_address}')
socket.RCVTIMEO = 1000
message = self._make_message(JobMessage, job, rm_id=self.id_)
try:
socket.send_pyobj(message)
response = socket.recv_pyobj()
except zmq.error.Again:
timestamp = self.clock.register_event()
log_message = f"Cannot offload to GS {self.gs_address})"
self._log(timestamp, log_message)
return self._make_message(JobNotAcceptedMessage)
if isinstance(response, JobAcceptedMessage):
timestamp = self.clock.receive_message(response.clock)
log_message = f"Offloaded job {job.id} to GS {self.gs_address}"
self._log(timestamp, log_message)
return self._make_message(JobAcceptedMessage)
elif isinstance(response, JobNotAcceptedMessage):
timestamp = self.clock.receive_message(response.clock)
log_message = f"Could not offload job {job.id} to GS {self.gs_address}"
self._log(timestamp, log_message)
return self._make_message(JobNotAcceptedMessage)
else:
timestamp = self.clock.register_event()
log_message = f"Got unexepected resonse {response} from GS {self.gs_address}"
self._log(timestamp, log_message)
return self._make_message(UnknownMessageError)
def _get_statuses(self) -> Dict[str, str]:
"""
Query each node for its status, and return a dictionary
which keys are node addresses and values are their statuses.
A node is omitted if it sends an incomprehensible message.
"""
statuses = {}
for node_address in self.node_addresses:
socket = self.context.socket(zmq.REQ)
socket.connect(f'tcp://{node_address}')
# TODO: Handle unpickling errors
status_message = self._make_message(GetStatusMessage)
socket.send_pyobj(status_message)
response = socket.recv_pyobj()
if isinstance(response, StatusMessage):
self.clock.receive_message(response.clock)
statuses[node_address] = response.status
else:
timestamp = self.clock.register_event()
log_message = f"Received: {response} (expected: {StatusMessage.__name__})"
self._log(timestamp, log_message)
return statuses
def _get_idle_node(self) -> Optional[str]:
"""
Query the nodes for their status, pick one which status is
'IDLE', and return it's address. If no nodes are free,
return None.
"""
statuses = self._get_statuses()
for address, status in statuses.items():
if status == 'IDLE':
return address
else:
return None
def _submit_job(self, job: Job, node_address: str) -> bool:
"""
Submit a job to a node. Return True if the node responds
with JobStartedMessage, and False otherwise.
"""
socket = self.context.socket(zmq.REQ)
socket.connect(f'tcp://{node_address}')
# TODO: Handle unpickling errors
message = self._make_message(StartJobMessage, job=job)
socket.send_pyobj(message)
response = socket.recv_pyobj()
if isinstance(response, JobStartedMessage):
timestamp = self.clock.receive_message(response.clock)
log_message = f'Submitted {job} to {node_address}'
self._log(timestamp, log_message)
return True
else:
timestamp = self.clock.register_event()
log_message = f'Submitting {job} to {node_address} failed\n'
self._log(timestamp, log_message)
return False
def _get_number_running_jobs(self) -> int:
statuses = self._get_statuses()
return sum(1 for status in statuses.values() if status == 'BUSY')
def get_load(self) -> int:
return self.job_queue.qsize()
def _listen_action(self):
return
def __repr__(self):
return f"ResourceManager(queue_size={self.job_queue.qsize()})"
| StarcoderdataPython |
8512 | <filename>charlotte/charlotte.py
class Config:
def __init__(self, config_file_name):
self.config_file_name = config_file_name
| StarcoderdataPython |
1754344 | from typing import Type
from iioy.core.adapters import BaseAdapter
from iioy.core.interfaces import AdapterInterface, AdapterMethod
from iioy.movies.models import Person
class PersonInterface(AdapterInterface):
def __init__(self, adapter_cls: Type[BaseAdapter], external_id):
self.external_id = external_id
super().__init__(adapter_cls)
def get_adapter(self):
return self.adapter_cls(self.external_id)
get_tmdb_id = AdapterMethod()
get_name = AdapterMethod()
get_profile_picture_url = AdapterMethod()
get_biography = AdapterMethod()
get_day_of_birth = AdapterMethod()
get_day_of_death = AdapterMethod()
get_homepage = AdapterMethod()
get_birthplace = AdapterMethod()
get_aliases = AdapterMethod()
def save(self):
return Person.objects.update_or_create(**self.get_person_data())
def build(self):
return Person(**self.get_person_data())
def get_person_data(self):
return dict(
tmdb_id=self.get_tmdb_id(),
name=self.get_name(),
profile_picture_url=self.get_profile_picture_url(),
biography=self.get_biography(),
day_of_birth=self.get_day_of_birth(),
day_of_death=self.get_day_of_death(),
homepage=self.get_homepage(),
birthplace=self.get_birthplace(),
aliases=self.get_aliases(),
)
| StarcoderdataPython |
3377026 | <filename>code/python/pymir/analytics/key_detection/musicnet/transformations/time_series_split.py
from sklearn.model_selection import train_test_split
from pymir import settings
from pymir.common import EXISTING_KEYS
import csv
import os
import pandas as pd
def generate_ds(train_fname, test_fname, test_size=0.2):
musicnet_fname = (
os.path.join(settings.DATA_DIR, 'musicnet', 'representations',
'time_series', 'time_domain', 'musicnet.csv'))
songs = {}
i = 0
with open(musicnet_fname) as f:
reader = csv.reader(f, delimiter=' ')
for row in reader:
if row[0] not in songs:
songs[row[0]] = [row]
else:
songs[row[0]].append(row)
i += 1
train_list = []
test_list = []
for k in EXISTING_KEYS:
if k in songs:
df = pd.Series(songs[k])
train, test = train_test_split(df, test_size=test_size)
train_list.append(train)
test_list.append(test)
train = pd.concat(train_list)
test = pd.concat(test_list)
# generate train and test sets, first note in every line is key of the song
with open(test_fname, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=' ')
for a in test:
writer.writerow(a)
with open(train_fname, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=' ')
for a in train:
writer.writerow(a)
def compute(train_size=0.8):
"""
Each song in the dataset is represented by metadata and a time series
that represents samples taken at a given sample rate.
"""
train_fname = (
os.path.join(settings.DATA_DIR, 'musicnet', 'representations',
'time_series', 'time_domain', 'musicnet_train.csv'))
test_fname = (
os.path.join(settings.DATA_DIR, 'musicnet', 'representations',
'time_series', 'time_domain', 'musicnet_test.csv'))
test_size = 1 - train_size
generate_ds(train_fname, test_fname, test_size=test_size)
| StarcoderdataPython |
3220454 | """
KNX/IP notification service.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/notify.knx/
"""
import asyncio
import voluptuous as vol
from homeassistant.components.knx import DATA_KNX, ATTR_DISCOVER_DEVICES
from homeassistant.components.notify import PLATFORM_SCHEMA, \
BaseNotificationService
from homeassistant.const import CONF_NAME
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
CONF_ADDRESS = 'address'
DEFAULT_NAME = 'KNX Notify'
DEPENDENCIES = ['knx']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_ADDRESS): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string
})
@asyncio.coroutine
def async_get_service(hass, config, discovery_info=None):
"""Get the KNX notification service."""
return async_get_service_discovery(hass, discovery_info) \
if discovery_info is not None else \
async_get_service_config(hass, config)
@callback
def async_get_service_discovery(hass, discovery_info):
"""Set up notifications for KNX platform configured via xknx.yaml."""
notification_devices = []
for device_name in discovery_info[ATTR_DISCOVER_DEVICES]:
device = hass.data[DATA_KNX].xknx.devices[device_name]
notification_devices.append(device)
return \
KNXNotificationService(notification_devices) \
if notification_devices else \
None
@callback
def async_get_service_config(hass, config):
"""Set up notification for KNX platform configured within platform."""
import xknx
notification = xknx.devices.Notification(
hass.data[DATA_KNX].xknx,
name=config.get(CONF_NAME),
group_address=config.get(CONF_ADDRESS))
hass.data[DATA_KNX].xknx.devices.add(notification)
return KNXNotificationService([notification, ])
class KNXNotificationService(BaseNotificationService):
"""Implement demo notification service."""
def __init__(self, devices):
"""Initialize the service."""
self.devices = devices
@property
def targets(self):
"""Return a dictionary of registered targets."""
ret = {}
for device in self.devices:
ret[device.name] = device.name
return ret
@asyncio.coroutine
def async_send_message(self, message="", **kwargs):
"""Send a notification to knx bus."""
if "target" in kwargs:
yield from self._async_send_to_device(message, kwargs["target"])
else:
yield from self._async_send_to_all_devices(message)
@asyncio.coroutine
def _async_send_to_all_devices(self, message):
"""Send a notification to knx bus to all connected devices."""
for device in self.devices:
yield from device.set(message)
@asyncio.coroutine
def _async_send_to_device(self, message, names):
"""Send a notification to knx bus to device with given names."""
for device in self.devices:
if device.name in names:
yield from device.set(message)
| StarcoderdataPython |
137523 | # -*- coding: utf-8 -*-
import scrapy
class WeixinSpider(scrapy.Spider):
name = 'weixin'
allowed_domains = ['ershicimi.com']
start_urls = ['http://ershicimi.com/']
def parse(self, response):
pass
| StarcoderdataPython |
3384061 | <filename>trace_for_guess/filenames.py
# SPDX-FileCopyrightText: 2021 <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: MIT
import os
import re
from trace_for_guess.netcdf_metadata import (get_metadata_from_trace_file,
get_metadata_from_trace_files)
def get_cru_filenames():
"""Create list of original CRU files between 1900 and 1990."""
years = [(y+1, y+10) for y in range(1920, 1971, 10)]
vars = ['cld', 'pre', 'wet', 'tmp']
# Combine every time segment (decade) with every variable.
years_vars = tuple((y1, y2, v) for (y1, y2) in years for v in vars)
return ["cru_ts4.01.%d.%d.%s.dat.nc" % (y1, y2, v) for (y1, y2, v) in
years_vars]
def get_crujra_filenames():
"""Create a list of all original CRU-JRA filenames from 1958 to 1990."""
return [f'crujra.V1.1.5d.pre.{year}.365d.noc.nc' for year in
range(1958, 1991)]
def get_modern_trace_filename(var: str):
"""Compose the name for the most recent TraCE-21ka NetCDF file."""
return f'trace.36.400BP-1990CE.cam2.h0.{var}.2160101-2204012.nc'
def get_time_range_of_trace_file(filename):
"""Get the time range (years BP) only from given TraCE-21ka filename.
Args:
filename: The base filename (without path!) of the TraCE file. The file
name must not have been altered!
Returns:
A list with two integers defining beginning and end of time range in
years BP.
Raises:
ValueError: If `filename` does not match the original TraCE-21ka naming
pattern as expected.
"""
try:
# For the youngest TraCE file, the time range goes to 1990 CE, which
# translates to -40 BP.
if 'CE' in filename:
return [400, -40]
match_obj = re.match(r'trace\.\d\d\.(\d+)-(\d+)BP.*', filename)
start = int(match_obj.group(1))
end = int(match_obj.group(2))
return [start, end]
except Exception:
raise ValueError("Given file name does not match TraCE-21ka naming "
f"pattern: '{filename}'")
def get_all_trace_filenames(variables: list):
"""Create a list of ALL original TraCE-21ka NetCDF filenames.
Args:
variables: List with the CAM variable names.
Returns:
A list of strings with the TraCE filenames.
"""
result = list()
for v in variables:
result += ['trace.01.22000-20001BP.cam2.h0.%s.0000101-0200012.nc' % v,
'trace.02.20000-19001BP.cam2.h0.%s.0200101-0300012.nc' % v,
'trace.03.19000-18501BP.cam2.h0.%s.0300101-0350012.nc' % v,
'trace.04.18500-18401BP.cam2.h0.%s.0350101-0360012.nc' % v,
'trace.05.18400-17501BP.cam2.h0.%s.0360101-0450012.nc' % v,
'trace.06.17500-17001BP.cam2.h0.%s.0450101-0500012.nc' % v,
'trace.07.17000-16001BP.cam2.h0.%s.0500101-0600012.nc' % v,
'trace.08.16000-15001BP.cam2.h0.%s.0600101-0700012.nc' % v,
'trace.09.15000-14901BP.cam2.h0.%s.0700101-0710012.nc' % v,
'trace.10.14900-14351BP.cam2.h0.%s.0710101-0765012.nc' % v,
'trace.11.14350-13871BP.cam2.h0.%s.0765101-0813012.nc' % v,
'trace.12.13870-13101BP.cam2.h0.%s.0813101-0890012.nc' % v,
'trace.13.13100-12901BP.cam2.h0.%s.0890101-0910012.nc' % v,
'trace.14.12900-12501BP.cam2.h0.%s.0910101-0950012.nc' % v,
'trace.15.12500-12001BP.cam2.h0.%s.0950101-1000012.nc' % v,
'trace.16.12000-11701BP.cam2.h0.%s.1000101-1030012.nc' % v,
'trace.17.11700-11301BP.cam2.h0.%s.1030101-1070012.nc' % v,
'trace.18.11300-10801BP.cam2.h0.%s.1070101-1120012.nc' % v,
'trace.19.10800-10201BP.cam2.h0.%s.1120101-1180012.nc' % v,
'trace.20.10200-09701BP.cam2.h0.%s.1180101-1230012.nc' % v,
'trace.21.09700-09201BP.cam2.h0.%s.1230101-1280012.nc' % v,
'trace.22.09200-08701BP.cam2.h0.%s.1280101-1330012.nc' % v,
'trace.23.08700-08501BP.cam2.h0.%s.1330101-1350012.nc' % v,
'trace.24.08500-08001BP.cam2.h0.%s.1350101-1400012.nc' % v,
'trace.25.08000-07601BP.cam2.h0.%s.1400101-1440012.nc' % v,
'trace.26.07600-07201BP.cam2.h0.%s.1440101-1480012.nc' % v,
'trace.27.07200-06701BP.cam2.h0.%s.1480101-1530012.nc' % v,
'trace.28.06700-06201BP.cam2.h0.%s.1530101-1580012.nc' % v,
'trace.29.06200-05701BP.cam2.h0.%s.1580101-1630012.nc' % v,
'trace.30.05700-05001BP.cam2.h0.%s.1630101-1700012.nc' % v,
'trace.31.05000-04001BP.cam2.h0.%s.1700101-1800012.nc' % v,
'trace.32.04000-03201BP.cam2.h0.%s.1800101-1880012.nc' % v,
'trace.33.03200-02401BP.cam2.h0.%s.1880101-1960012.nc' % v,
'trace.34.02400-01401BP.cam2.h0.%s.1960101-2060012.nc' % v,
'trace.35.01400-00401BP.cam2.h0.%s.2060101-2160012.nc' % v,
'trace.36.400BP-1990CE.cam2.h0.%s.2160101-2204012.nc' % v]
return result
def ranges_overlap(range1, range2):
"""Whether two 2-element lists have an overlap.
Args:
range1, range2: Each a 2-element list with numbers.
Returns:
True if there is overlap, False otherwise.
Raises:
TypeError: An argument is not a list.
ValueError: One of the list doesn’t have exactly 2 elements.
"""
if not isinstance(range1, list) or not isinstance(range2, list):
raise TypeError('Both arguments must be a list.')
if len(range1) != 2 or len(range2) != 2:
raise ValueError('Both lists must have two elements each.')
if max(range1) < min(range2):
return False
if max(range2) < min(range1):
return False
return True
def get_trace_filenames(variables, time_range):
"""Get the list of original TraCE-21ka filenames that cover the time range.
Args:
variables: A list of TraCE variables (can also be a single string).
time_range: A list with two integers, the start and end of the time
frame in years BP. Values must lie between 22000 BP and -40 BP
(i.e. 1990 CE).
Returns:
List of file names.
Raises:
ValueError: If `time_range` is not valid.
"""
if not isinstance(variables, list):
variables = [variables]
if min(time_range) < -40 or max(time_range) > 22000:
raise ValueError('The given time range is invalid. The numbers must '
'lie between 22000 BP and -40 BP (=1990 CE). I got '
f'this: {time_range}')
all_files = get_all_trace_filenames(variables)
result = list()
for f in all_files:
if ranges_overlap(time_range, get_time_range_of_trace_file(f)):
result += [f]
return result
def derive_new_trace_name(trace_file, var):
"""Compose a new basename for a TraCE file with already absolute calendar.
Args:
trace_file: An existing TraCE NetCDF file.
var: The CCSM3 variable in the NetCDF file.
Returns:
String with the new base filename.
Raises:
FileNotFoundError: If `trace_file` does not exist.
"""
if not os.path.isfile(trace_file):
raise FileNotFoundError(f"Could not find TraCE file '{trace_file}'.")
metadata = get_metadata_from_trace_file(trace_file)
first_year = metadata['first_year']
last_year = metadata['last_year']
name = f'trace_{first_year:05}-{last_year:05}_{var}.nc'
return name
def derive_new_concat_trace_name(trace_filelist, var):
"""Compose a new basename for the concatenation of many TraCE files.
Args:
trace_filelist: List of TraCE file paths.
var: The CCSM3 variable in the NetCDF file.
Returns:
String with new base filename.
"""
metadata = get_metadata_from_trace_files(trace_filelist)
first_year = metadata['first_year']
last_year = metadata['last_year']
return f'trace_{first_year:05}-{last_year:05}_{var}.nc'
def get_co2_filename(first_year, last_year):
"""Compose a basename for a CO₂ file."""
return f'trace_{first_year:05}-{last_year:05}_CO2.txt'
| StarcoderdataPython |
127799 | # -*- coding: utf-8 -*-
"""
Created on Mon May 14 16:50:33 2018
@author: ADay
"""
import os
import pandas as pd
import numpy as np
import requests
import time
import json
def get_earliest_date(item):
"""
Given a crossref works record, find the earliest date.
"""
tags = ['issued','created','indexed','deposited']
stamps = []
for tag in tags:
try:
stamps.append(int(item[tag]['timestamp']))
except:
pass
try:
t_stamp = min(stamps)
except:
t_stamp = np.nan
return t_stamp
def json_authors_to_list(authors):
"""
Take JSON formatted author list from CrossRef data and convert to string in the same format as the search.
"""
match_authors = []
for author in authors:
try:
given = author['given']
except:
given = ''
try:
family = author['family']
except:
family = ''
match_authors.append(given+'+'+family)
return match_authors
def strip_newlines(s):
"""
Strip new lines and replace with spaces
"""
return s.replace('\n', ' ').replace('\r', '')
def get_output(ms_id,item, authors, t_sim, rank):
"""
Given search result data, put it into a form suitable for the output spreadsheet.
"""
match_title = strip_newlines(item['title'][0])
score = item['score']
match_doi = item['DOI']
# build authors list
authors2 = item['author']
match_authors = json_authors_to_list(authors2)
# check author name matches here. Do we have a first initial and surname in common with the articles CR has found?
names1 = [(name[0],name[name.rfind('+')+1:]) for name in authors.split(', ')]
names2 = [(name[0],name[name.rfind('+')+1:]) for name in match_authors]
match_one = any(name in names2 for name in names1)
match_all = all(name in names2 for name in names1)
publisher = item['publisher']
try:
match_type = strip_newlines(item['type'])
except:
match_type = ''
try:
match_journal = strip_newlines(item['container-title'][0])
except:
match_journal = ''
try:
match_pub_date = str(item['issued']['date-parts'][0])
except:
match_pub_date=''
earliest_date = get_earliest_date(item)
cr_score = score
cr_cites = item['is-referenced-by-count']
return {'ms_id': ms_id,
'match_doi': match_doi,
'match_type': match_type,
'match_title': match_title,
'match_authors': match_authors,
'publisher': publisher,
'journal': match_journal,
'match_pub_date': match_pub_date,
'earliest_date': earliest_date,
't_sim':t_sim,
'match_one':match_one,
'match_all': match_all,
'cr_score':cr_score,
'cr_cites':cr_cites,
'rank':rank}
def search_cr(title, authors, pubdate_filter, myemail):
"""
Searches CrossRef for matching titles.
Requires: requests, json
"""
authors = authors.split(', ')
address = "https://api.crossref.org/works/"
# search crossref
payload = { 'filter' : pubdate_filter,
'query.bibliographic' : title, # formerly query.title ... see: https://status.crossref.org/incidents/4y45gj63jsp4
'query.author' : authors,
'rows' : 10} # you might want to change this...
headers = {
'User-Agent': "SAGE's rejected article tracker",
'mailto': myemail
}
r = requests.get(address, params=payload, headers = headers)
# check response time. Documentation recommends backing off
# if response time is too long.
response_time = r.elapsed.total_seconds()
# uncomment this to monitor the response times
# print(response_time, 'seconds for last request')
# responses are generally <1s.
# simple rule for sleeping if responses are slow
if response_time > 2.0:
sleep_time = int(response_time)#*2
print('CrossRef slow to respond to last request. Sleeping for {} seconds.'.format(sleep_time))
time.sleep(sleep_time)
# read the json response as a dict
rj = r.json()['message']['items']
return rj
def convert_name(names):
"""
Converts name into a simple form suitable for the CrossRef search
"""
if ';' in names:
names = names.split('; ')
else:
names = [names]
out = []
for name in names:
name = name.split(', ')
name = name[::-1]
name = '+'.join(name)
out.append(name)
return ', '.join(out)
def raw(s):
"""
Coverts ScholarOne MS_ID into its base form (i.e. removes revision number if present)
"""
if '.R' in s:
s = s.split('.R')[0]
return s
def build_input(dates):
"""
Preprocessing function for input data.
Finds all input data and concatenates into 1 big dataframe
Discards rows with missing data
Ensures datetime columns are the correct data type
Limits data to specific date range
"""
filepaths = os.listdir('input')
df = pd.DataFrame({})
allowed_cols = ['Journal Name', 'Manuscript Type', 'Manuscript ID', 'Manuscript Title', 'Author Names',
'Submission Date', 'Decision Date', 'Decision Type', 'Accept or Reject Final Decision']
for filepath in filepaths:
# set cols to row 1
df_ = pd.read_excel(os.path.join('input',filepath))
df_ = df_[allowed_cols]
df = pd.concat([df,df_])
# drop drafts
df = df[df['Manuscript ID']!='draft']
# drop nans
df = df.dropna(subset=['Manuscript Title', 'Author Names'])
# TODO - also drop titles that are < X chars. This will help when dealing with drafts
# create a raw id col (without revision no)
df['raw_ms_id'] = df['Manuscript ID'].map(lambda x: raw(x))
# set datetimes
df['Submission Date'] = pd.to_datetime(df['Submission Date'], errors='coerce')
df['Decision Date'] = pd.to_datetime(df['Decision Date'], errors='coerce')
# limit dates
df = df[(df['Submission Date'] >= dates[0] ) & (df['Submission Date'] <= dates[1])]
return df
def pre_process(dates):
"""
Further preprocessing
- drop duplicates
- drop anything which wasn't accepted (you might prefer to go with anything that was rejected)
"""
df = build_input(dates)
# limit to article THAT WERE NOT ACCEPTED (not articles that were rejected! it might be interesting to see if articles sent for revision ended up elsewhere)
df = df[df['Accept or Reject Final Decision'] != 'Accept']
# limit to articles that were accepted (for testing purposes)
# df = df[df['Accept or Reject Final Decision'] == 'Accept']
# drop duplicates otherwise there can be a row for every revision of each article (assume titles stay constant - could change this to subset = ['Manuscript Title'])
# keep = last means that we should be looking at the final round of revision.
df = df.drop_duplicates(subset = ['raw_ms_id'], keep = 'last')
# add a text version of the submission date (input parameter for CR search)
df['text_sub_date'] = df['Submission Date'].astype( str ).map( lambda x: str(x)[:10] )
# convert author names for input to the CR search engine
df['Authors'] = df['Author Names'].map( lambda x: convert_name(x) )
return df
| StarcoderdataPython |
3338984 | <filename>Treasuregram/main_app/views.py<gh_stars>0
# -*- coding: utf-8 -*-
"""Treasuregram View Configuration"""
from __future__ import unicode_literals
from django.shortcuts import render
from .models import Treasure
# from django.http import HttpResponse
# Create your views here.
def index(request):
""" The function called when going to localhost:8888/index"""
# rendering HttpResponse
# return HttpResponse('<h1> Hello World </h1>')
# When not using object class
# name = '<NAME>'
# value = 1000.00
# context = {
# 'treasure_name': name,
# 'treasure_val': value
# }
treasures = Treasure.objects.all()
return render(request, "index.html", {"treasures": treasures})
"""
class Treasure:
" This is my Treasure Class. "
def __init__(self, name, value, material, location):
self.name = name
self.value = value
self.material = material
self.location = location
treasures = [
Treasure("Gold Nugget", 500.00, "Gold", "Curly's Creek, NM"),
Treasure("Fool's Gold", 0, "Pyrite", "Fool's Falls, CO"),
Treasure("Coffee Can", 25.00, "Aluminium", "Acme, CA")
]
""" | StarcoderdataPython |
97186 | from django.db import models
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from phonenumber_field.modelfields import PhoneNumberField
import datetime as dt
import string as str
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
website = models.CharField(max_length=30, blank=True)
email = models.EmailField()
phone_number = PhoneNumberField(max_length=10, blank=True)
location = models.CharField(max_length=30, blank=True)
User.profile = property(lambda u: Profile.objects.get_or_create(user=u)[0])
def __str__(self):
return self.user.username
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
@classmethod
def get_profile(cls,user_id):
profiles = Profile.objects.all()
other_userprofiles = []
for profile in profiles:
if profile.user_id !=user_id:
other_userprofiles.append(profile)
return other_userprofiles
def generate_id():
n = 10
random = str.ascii_uppercase + str.ascii_lowercase + str.digits
return ''.join(choice(random) for _ in range(n))
# end
# class Profile(models.Model):
# name = models.CharField(max_length=120)
# description = models.TextField(default='description default text')
#
# def __str__(self):
# return self.name
#
# def save_profile(self):
# self.save()
#
# @classmethod
# def get_profile(cls):
# profile = Profile.objects.all()
#
# return profile
#
# class Meta:
# ordering = ['name']
class ContactRecipient(models.Model):
full_name = models.CharField(max_length = 30)
email = models.EmailField()
comment = models.TextField()
| StarcoderdataPython |
1664316 | <reponame>welvin21/pysimt<gh_stars>10-100
from .metric import Metric
from .multibleu import BLEUScorer
from .sacrebleu import SACREBLEUScorer
from .meteor import METEORScorer
from .cer import CERScorer
from .wer import WERScorer
from .simnmt import AVPScorer, AVLScorer, CWMScorer
"""These metrics can be used in early stopping."""
# Generation related metrics
beam_metrics = ["BLEU", "SACREBLEU", "METEOR", "CER", "WER"]
metric_info = {
'BLEU': 'max',
'SACREBLEU': 'max',
'METEOR': 'max',
'LOSS': 'min',
'ACC': 'max',
'RECALL': 'max',
'PRECISION': 'max',
'F1': 'max',
'CER': 'min',
'WER': 'max',
# simultaneous translation
'AVP': 'min', # Average proportion (Cho and Esipova, 2016)
'AVL': 'min', # Average Lagging (Ma et al., 2019 (STACL))
'DAL': 'min', # Differentiable AL (not implemented)
'CW': 'min', # Consecutive wait (Gu et al., 2017) [Not Implemented]
}
| StarcoderdataPython |
1790604 | class AliPayException(Exception):
def __init__(self, code, message):
self.__code = code
self.__message = message
def to_unicode(self):
return "AliPayException: code:{}, message:{}".format(self.__code, self.__message)
def __str__(self):
return self.to_unicode()
def __repr__(self):
return self.to_unicode()
class AliPayValidationError(Exception):
pass
| StarcoderdataPython |
3254204 | <gh_stars>1-10
from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
from .provider import BitlyProvider
urlpatterns = default_urlpatterns(BitlyProvider)
| StarcoderdataPython |
121723 | #!/usr/bin/env python3
with open("input.txt", "r") as f:
all_groups = [x.strip().split("\n") for x in f.read().split("\n\n")]
anyone = 0
everyone = 0
for group in all_groups:
all = set(group[0])
any = set(group[0])
for person in group[1:]:
all = all.intersection(person)
any.update(*person)
everyone += len(all)
anyone += len(any)
print(f"part 1: ", anyone)
print(f"part 2: ", everyone)
| StarcoderdataPython |
3330272 | import os
import io
import hashlib
from base64 import standard_b64encode
from six.moves.urllib.request import urlopen, Request
from six.moves.urllib.error import HTTPError
from infi.pyutils.contexts import contextmanager
from infi.pypi_manager import PyPI, DistributionNotFound
from logging import getLogger
logger = getLogger()
def send_setuptools_request(repository, username, password, data):
# code taken from distribute 40.9.0, file ./setuptools/command/upload.py
# changed logging and return value
# TODO use code from twine?
# set up the authentication
user_pass = (username + ":" + password).encode('ascii')
# The exact encoding of the authentication string is debated.
# Anyway PyPI only accepts ascii for both username or password.
auth = "Basic " + standard_b64encode(user_pass).decode('ascii')
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b'\r\n--' + boundary.encode('ascii')
end_boundary = sep_boundary + b'--\r\n'
body = io.BytesIO()
for key, value in data.items():
title = '\r\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if not isinstance(value, list):
value = [value]
for value in value:
if type(value) is tuple:
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = str(value).encode('utf-8')
body.write(sep_boundary)
body.write(title.encode('utf-8'))
body.write(b"\r\n\r\n")
body.write(value)
body.write(end_boundary)
body = body.getvalue()
logger.info("Submitting %s to %s" % (data['content'][0], repository))
# build the Request
headers = {
'Content-type': 'multipart/form-data; boundary=%s' % boundary,
'Content-length': str(len(body)),
'Authorization': auth,
}
request = Request(repository, data=body,
headers=headers)
# send the data
try:
result = urlopen(request)
status = result.getcode()
reason = result.msg
except HTTPError as e:
status = e.code
reason = e.msg
except OSError as e:
logger.exception("")
raise
if status == 200:
return True
else:
logger.error('Upload failed (%s): %s' % (status, reason))
return False
def mirror_file(repository_config, filename, package_name, package_version, metadata):
# merge the metadata with constant data that setuptools sends and data about the file.
# then call the function that actually sends the post request.
f = open(filename, 'rb')
content = f.read()
f.close()
basename = os.path.basename(filename)
data = {
':action': 'file_upload',
'protocol_version': '1',
'metadata_version': '1.0',
'content': (basename, content),
'md5_digest': hashlib.md5(content).hexdigest(),
'name': package_name,
'version': package_version,
}
data.update(metadata)
repository = repository_config["repository"]
username = repository_config.get("username", "")
password = repository_config.get("password", "")
send_setuptools_request(repository, username, password, data)
@contextmanager
def temp_urlretrieve(url, localpath):
import requests
logger.info("Retrieving {}".format(url))
req = requests.get(url)
with open(localpath, 'wb') as fd:
fd.write(req.content)
try:
yield
finally:
os.remove(localpath)
def mirror_release(repository_config, package_name, version, version_data, release_data):
""" mirror a release (e.g. one sdist/bdist_egg etc.) based on data retrieved from
pypi about the package version and the release itself. """
# prepare metadata to post, download the file, and call mirror_file which finalizes the data and
# posts it to the server
metadata = {
'filetype': release_data['packagetype'],
'pyversion': '' if release_data['python_version'] == 'source' else release_data['python_version'],
'comment': release_data['comment_text'],
}
for key in ['license', 'author', 'author_email', 'home_page', 'platform', 'summary', 'classifiers', 'description']:
metadata[key] = version_data[key]
with temp_urlretrieve(release_data['url'], release_data['filename']):
return mirror_file(repository_config, release_data['filename'], package_name, version, metadata)
def get_repository_config(server_name):
# we get a pypi repository alias but we need the url+username+password from pypirc
# distutils does the translation, but we have to fool it a little
from distutils.config import PyPIRCCommand
from distutils.dist import Distribution
pypirc = PyPIRCCommand(Distribution())
pypirc.repository = server_name
return pypirc._read_pypirc()
def mirror_package(server_name, package_name, version=None):
pypi = PyPI()
version = version or pypi.get_latest_version(package_name)
version_data = pypi.get_release_data(package_name, version)
release_dataset = pypi._client.release_urls(package_name, version)
repository_config = get_repository_config(server_name)
final_result = True
if not release_dataset:
msg = "No distributions found for {} {} (maybe you should try to build from download url?)"
raise DistributionNotFound(msg.format(package_name, version))
for release_data in release_dataset:
result = mirror_release(repository_config, package_name, version, version_data, release_data)
final_result = final_result and result
return final_result
| StarcoderdataPython |
40448 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
sfftk.unittests.test_readers
This testing module should have no side-effects because it only reads.
"""
from __future__ import division, print_function
import glob
import os
import struct
import sys
import unittest
import numpy
import random_words
import __init__ as tests
import ahds
from ..readers import amreader, mapreader, modreader, segreader, stlreader, surfreader
__author__ = "<NAME>, PhD"
__email__ = "<EMAIL>, <EMAIL>"
__date__ = "2017-05-15"
__updated__ = '2018-02-14'
rw = random_words.RandomWords()
# readers
class TestReaders_amreader(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.am_file = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_data.am')
cls.header, cls.segments_by_stream = amreader.get_data(cls.am_file)
def test_get_data(self):
"""Test the main entry point: get_data(...)"""
self.assertIsInstance(self.header, ahds.header.AmiraHeader)
self.assertIsInstance(self.segments_by_stream, numpy.ndarray)
self.assertGreaterEqual(len(self.segments_by_stream), 1)
def test_first_line_amiramesh(self):
"""test that it's declared as an AmiraMesh file"""
self.assertEqual(self.header.designation.filetype, 'AmiraMesh')
def test_first_line_binary_little_endian(self):
"""test that it is formatted as BINARY-LITTLE-ENDIAN"""
self.assertEqual(self.header.designation.format, 'BINARY-LITTLE-ENDIAN')
def test_first_line_version(self):
"""test that it is version 2.1"""
self.assertEqual(self.header.designation.version, '2.1')
def test_lattice_present(self):
"""test Lattice definition exists in definitions"""
self.assertTrue('Lattice' in self.header.definitions.attrs)
def test_materials_present(self):
"""test Materials exist in parameters"""
self.assertIsNotNone('Materials' in self.header.parameters.attrs)
def test_read_hxsurface(self):
"""Test handling of AmiraMesh hxsurface files"""
am_hxsurface_file = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_data_hxsurface.am')
header, segments_by_stream = amreader.get_data(am_hxsurface_file)
self.assertIsInstance(header, ahds.header.AmiraHeader)
self.assertIsNone(segments_by_stream)
class TestReaders_mapreader(unittest.TestCase):
def setUp(self):
self.map_file = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_data.map')
def test_get_data(self):
"""Test the main entry point: get_data(...)"""
map_ = mapreader.get_data(self.map_file)
self.assertIsInstance(map_, mapreader.Map)
self.assertGreater(map_._nc, 0)
self.assertGreater(map_._nr, 0)
self.assertGreater(map_._ns, 0)
self.assertIn(map_._mode, range(5))
self.assertIsInstance(map_._ncstart, int)
self.assertIsInstance(map_._nrstart, int)
self.assertIsInstance(map_._nsstart, int)
self.assertGreater(map_._nx, 0)
self.assertGreater(map_._ny, 0)
self.assertGreater(map_._nz, 0)
self.assertGreater(map_._x_length, 0)
self.assertGreater(map_._y_length, 0)
self.assertGreater(map_._z_length, 0)
self.assertTrue(0 < map_._alpha < 180)
self.assertTrue(0 < map_._beta < 180)
self.assertTrue(0 < map_._gamma < 180)
self.assertIn(map_._mapc, range(1, 4))
self.assertIn(map_._mapr, range(1, 4))
self.assertIn(map_._maps, range(1, 4))
self.assertIsInstance(map_._amin, float)
self.assertIsInstance(map_._amax, float)
self.assertIsInstance(map_._amean, float)
self.assertIn(map_._ispg, range(1, 231))
self.assertTrue(map_._nsymbt % 80 == 0)
self.assertIn(map_._lskflg, range(2))
self.assertIsInstance(map_._s11, float)
self.assertIsInstance(map_._s12, float)
self.assertIsInstance(map_._s13, float)
self.assertIsInstance(map_._s21, float)
self.assertIsInstance(map_._s22, float)
self.assertIsInstance(map_._s23, float)
self.assertIsInstance(map_._s31, float)
self.assertIsInstance(map_._s32, float)
self.assertIsInstance(map_._s33, float)
self.assertIsInstance(map_._t1, float)
self.assertIsInstance(map_._t2, float)
self.assertIsInstance(map_._t3, float)
self.assertEqual(map_._map, 'MAP ')
self.assertIsInstance(map_._machst, tuple)
self.assertGreater(map_._rms, 0)
self.assertGreater(map_._nlabl, 0)
def test_write(self):
"""Test write map file"""
map_to_write = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_write_map.map')
written_maps = glob.glob(map_to_write)
self.assertEqual(len(written_maps), 0)
with open(map_to_write, 'w') as f:
map_ = mapreader.get_data(self.map_file)
map_.write(f)
written_maps = glob.glob(map_to_write)
self.assertEqual(len(written_maps), 1)
map(os.remove, written_maps)
def test_invert(self):
"""Test invert map intensities"""
map_ = mapreader.get_data(self.map_file, inverted=False)
self.assertFalse(map_._inverted)
map_.invert()
self.assertTrue(map_._inverted)
map_ = mapreader.get_data(self.map_file, inverted=True)
self.assertTrue(map_._inverted)
# check the inversion is complete and that we add a new label
with open('rm.map', 'w') as f:
map_.write(f)
map__ = mapreader.get_data('rm.map')
self.assertEqual(map__._nlabl, 2)
os.remove('rm.map')
def test_fix_mask(self):
"""Test fix mask for fixable mask"""
fixable_mask = mapreader.Map(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_fixable_mask.map'))
self.assertFalse(fixable_mask.is_mask)
fixable_mask.fix_mask()
self.assertTrue(fixable_mask.is_mask)
def test_unfixable_mask(self):
"""Test exception for unfixable mask"""
unfixable_mask = mapreader.Map(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_unfixable_mask.map'))
self.assertFalse(unfixable_mask.is_mask)
with self.assertRaises(ValueError):
unfixable_mask.fix_mask()
self.assertFalse(unfixable_mask.is_mask)
def test_bad_data_fail(self):
"""Test that a corrupted file (extra data at end) raises Exception"""
with self.assertRaises(ValueError):
mapreader.Map(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_bad_data1.map'))
def test_bad_data_fail2(self):
"""Test that we can raise an exception with a malformed header"""
with self.assertRaises(ValueError):
mapreader.get_data(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_data_corrupt_header.map'))
def test_bad_data_fail3(self):
"""Test that we can't have too long a header"""
with self.assertRaises(ValueError):
# create a map file with a header larger than 1024 to see the exception
map = mapreader.get_data(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_data.map'))
for i in range(map._nlabl):
label = getattr(map, '_label_{}'.format(i))
y = 11
for j in range(1, y):
setattr(map, '_label_{}'.format(j), label)
map._nlabl = y
with open('rm.map', 'w') as f:
map.write(f)
class TestReaders_modreader(unittest.TestCase):
@classmethod
def setUp(cls):
cls.mod_file = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_data.mod')
cls.mod = modreader.get_data(cls.mod_file)
def test_get_data(self):
"""Test the main entry point: get_data(...)"""
self.assertTrue(self.mod.isset)
self.assertGreater(len(self.mod.objts), 0)
self.assertGreater(self.mod.objt_count, 0)
self.assertEqual(self.mod.version, 'V1.2')
self.assertEqual(self.mod.name, 'IMOD-NewModel')
self.assertGreater(self.mod.xmax, 0)
self.assertGreater(self.mod.ymax, 0)
self.assertGreater(self.mod.zmax, 0)
self.assertGreaterEqual(self.mod.objsize, 1)
self.assertIn(self.mod.drawmode, [-1, 1])
self.assertIn(self.mod.mousemode, range(3)) # unclear what 2 is equal to INVALID VALUE
self.assertIn(self.mod.blacklevel, range(256))
self.assertIn(self.mod.whitelevel, range(256))
self.assertEqual(self.mod.xoffset, 0)
self.assertEqual(self.mod.yoffset, 0)
self.assertEqual(self.mod.zoffset, 0)
self.assertGreater(self.mod.xscale, 0)
self.assertGreater(self.mod.yscale, 0)
self.assertGreater(self.mod.zscale, 0)
self.assertGreaterEqual(self.mod.object, 0)
self.assertGreaterEqual(self.mod.contour, -1)
self.assertGreaterEqual(self.mod.point, -1)
self.assertGreaterEqual(self.mod.res, 0)
self.assertIn(self.mod.thresh, range(256))
self.assertGreater(self.mod.pixsize, 0)
self.assertIn(self.mod.units, ['pm', 'Angstroms', 'nm', 'microns', 'mm', 'cm', 'm', 'pixels', 'km'])
self.assertIsInstance(self.mod.csum, int)
self.assertEqual(self.mod.alpha, 0)
self.assertEqual(self.mod.beta, 0)
self.assertEqual(self.mod.gamma, 0)
def test_read_fail1(self):
"""Test that file missing 'IMOD' at beginning fails"""
mod_fn = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_bad_data1.mod')
with self.assertRaises(ValueError):
modreader.get_data(mod_fn) # missing 'IMOD' start
def test_read_fail2(self):
"""Test that file missing 'IEOF' at end fails"""
mod_fn = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_bad_data2.mod')
with self.assertRaises(ValueError):
modreader.get_data(mod_fn) # missing 'IEOF' end
def test_IMOD_pass(self):
"""Test that IMOD chunk read"""
self.assertTrue(self.mod.isset)
def test_OBJT_pass(self):
"""Test that OBJT chunk read"""
for O in self.mod.objts.itervalues():
self.assertTrue(O.isset)
def test_CONT_pass(self):
"""Test that CONT chunk read"""
for O in self.mod.objts.itervalues():
for C in O.conts.itervalues():
self.assertTrue(C.isset)
def test_MESH_pass(self):
"""Test that MESH chunk read"""
for O in self.mod.objts.itervalues():
for M in O.meshes.itervalues():
self.assertTrue(M.isset)
def test_IMAT_pass(self):
"""Test that IMAT chunk read"""
for O in self.mod.objts.itervalues():
self.assertTrue(O.imat.isset)
def test_VIEW_pass(self):
"""Test that VIEW chunk read"""
for V in self.mod.views.itervalues():
self.assertTrue(V.isset)
def test_MINX_pass(self):
"""Test that MINX chunk read"""
self.assertTrue(self.mod.minx.isset)
def test_MEPA_pass(self):
"""Test that MEPA chunk read"""
for O in self.mod.objts.itervalues():
try:
self.assertTrue(O.mepa.isset)
except AttributeError:
self.assertEqual(O.mepa, None)
def test_CLIP_pass(self):
"""Test that CLIP chunk read"""
for O in self.mod.objts.itervalues():
try:
self.assertTrue(O.clip.isset)
except AttributeError:
self.assertEqual(O.clip, None)
def test_number_of_OBJT_chunks(self):
"""Test that compares declared and found OBJT chunks"""
self.assertEqual(self.mod.objsize, len(self.mod.objts))
def test_number_of_CONT_chunks(self):
"""Test that compares declared and found CONT chunks"""
for O in self.mod.objts.itervalues():
self.assertEqual(O.contsize, len(O.conts))
def test_number_of_MESH_chunks(self):
"""Test that compares declared and found MESH chunks"""
for O in self.mod.objts.itervalues():
self.assertEqual(O.meshsize, len(O.meshes))
def test_number_of_surface_objects(self):
"""Test that compares declared and found surface objects"""
for O in self.mod.objts.itervalues():
no_of_surfaces = 0
for C in O.conts.itervalues():
if C.surf != 0:
no_of_surfaces += 1
self.assertEqual(O.surfsize, no_of_surfaces)
def test_number_of_points_in_CONT_chunk(self):
"""Test that compares declared an found points in CONT chunks"""
for O in self.mod.objts.itervalues():
for C in O.conts.itervalues():
self.assertEqual(C.psize, len(C.pt))
def test_number_of_vertex_elements_in_MESH_chunk(self):
"""Test that compares declared an found vertices in MESH chunks"""
for O in self.mod.objts.itervalues():
for M in O.meshes.itervalues():
self.assertEqual(M.vsize, len(M.vert))
def test_number_of_list_elements_in_MESH_chunk(self):
"""Test that compares declared an found indices in MESH chunks"""
for O in self.mod.objts.itervalues():
for M in O.meshes.itervalues():
self.assertEqual(M.lsize, len(M.list))
class TestReaders_segreader(unittest.TestCase):
def setUp(self):
self.seg_file = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_data.seg')
def test_get_data(self):
"""Test the main entry point: get_data(...)"""
seg = segreader.get_data(self.seg_file)
print(seg, file=sys.stderr)
self.assertIsInstance(seg, segreader.SeggerSegmentation)
self.assertEqual(seg.map_level, 0.852)
self.assertEqual(seg.format_version, 2)
self.assertItemsEqual(seg.map_size, [26, 27, 30])
self.assertEqual(seg.format, 'segger')
self.assertEqual(seg.mask.shape, (30, 27, 26))
class TestReaders_stlreader(unittest.TestCase):
def setUp(self):
self.stl_file = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_data.stl')
self.stl_bin_file = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_data_binary.stl')
self.stl_multi_file = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_data_multiple.stl')
def test_get_data(self):
"""Test the main entry point: get_data(...)"""
meshes = stlreader.get_data(self.stl_file) # only one mesh here
name, vertices, polygons = meshes[0]
num_vertices = len(vertices)
a, b, c = zip(*polygons.values())
vertex_ids = set(a + b + c)
self.assertEqual(name, "{}#{}".format(os.path.basename(self.stl_file), 0))
self.assertGreaterEqual(num_vertices, 1)
self.assertEqual(min(vertex_ids), min(vertices.keys()))
self.assertEqual(max(vertex_ids), max(vertices.keys()))
self.assertEqual(sum(set(vertex_ids)), sum(vertices.keys()))
self.assertEqual(set(vertex_ids), set(vertices.keys()))
def test_read_binary(self):
"""Test that we can read a binary STL file"""
meshes = stlreader.get_data(self.stl_bin_file)
print(meshes[0][0], file=sys.stderr)
name, vertices, polygons = meshes[0]
self.assertEqual(name, "{}#{}".format(os.path.basename(self.stl_bin_file), 0))
self.assertTrue(len(vertices) > 0)
self.assertTrue(len(polygons) > 0)
polygon_ids = list()
for a, b, c in polygons.itervalues():
polygon_ids += [a, b, c]
self.assertItemsEqual(set(vertices.keys()), set(polygon_ids))
def test_read_multiple(self):
"""Test that we can read a multi-solid STL file
Only works for ASCII by concatenation"""
meshes = stlreader.get_data(self.stl_multi_file)
for name, vertices, polygons in meshes:
self.assertEqual(name, "{}#{}".format(os.path.basename(self.stl_multi_file), 0))
self.assertTrue(len(vertices) > 0)
self.assertTrue(len(polygons) > 0)
polygon_ids = list()
for a, b, c in polygons.itervalues():
polygon_ids += [a, b, c]
self.assertItemsEqual(set(vertices.keys()), set(polygon_ids))
class TestReaders_surfreader(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.surf_file = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_data.surf')
cls.header, cls.segments = surfreader.get_data(cls.surf_file) # only one mesh here
def test_get_data(self):
"""Test the main entry point: get_data(...)"""
name = self.segments[2].name
vertices = self.segments[2].vertices
triangles = self.segments[2].triangles
num_vertices = len(vertices)
a, b, c = zip(*triangles)
vertex_ids = set(a + b + c)
self.assertIsInstance(self.header, ahds.header.AmiraHeader)
self.assertIsInstance(self.segments, dict)
self.assertEqual(name, 'medulla_r')
self.assertGreaterEqual(num_vertices, 1)
self.assertGreaterEqual(len(self.segments), 1)
self.assertEqual(min(vertex_ids), min(vertices.keys()))
self.assertEqual(max(vertex_ids), max(vertices.keys()))
self.assertEqual(sum(set(vertex_ids)), sum(vertices.keys()))
self.assertEqual(set(vertex_ids), set(vertices.keys()))
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3324693 | import scipy.io as sio
import numpy as np
import os
def sensing_method(method_name,specifics):
# a function which returns a sensing method with given parameters. a sensing method is a subclass of nn.Module
return 1
def computInitMx(Training_labels, specifics):
if(specifics['use_universal_matrix'] == True):
save_path = f"{specifics['qinit_dir']}/{specifics['custom_dataset_name']}_{specifics['cs_ratio']}/"
phi_path = os.path.join(save_path, 'Phi_input.npy')
qinit_path = os.path.join(save_path, 'Qinit.npy')
scratch = (not specifics['load_qinit_from_dir']) if 'load_qinit_from_dir' in specifics else False
if not scratch and os.path.exists(save_path) and os.path.exists(phi_path) and os.path.exists(qinit_path):
return np.load(phi_path), np.load(qinit_path)
else:
os.makedirs(save_path)
Phi_input, Qinit = computInitMxScratch(Training_labels, specifics)
np.save(phi_path, Phi_input)
np.save(qinit_path, Qinit)
return Phi_input, Qinit
Phi_data_Name = './%s/phi_0_%d_1089.mat' % (specifics['matrix_dir'], specifics['cs_ratio'])
Phi_data = sio.loadmat(Phi_data_Name)
Phi_input = Phi_data['phi']
Qinit_Name = './%s/Initialization_Matrix_%d.mat' % (specifics['matrix_dir'], specifics['cs_ratio'])
# Computing Initialization Matrix:
if os.path.exists(Qinit_Name):
Qinit_data = sio.loadmat(Qinit_Name)
Qinit = Qinit_data['Qinit']
else:
X_data = Training_labels.transpose()
Y_data = np.dot(Phi_input, X_data)
Y_YT = np.dot(Y_data, Y_data.transpose())
X_YT = np.dot(X_data, Y_data.transpose())
Qinit = np.dot(X_YT, np.linalg.inv(Y_YT))
del X_data, Y_data, X_YT, Y_YT
sio.savemat(Qinit_Name, {'Qinit': Qinit})
return Phi_input, Qinit
def computInitMxScratch(Training_labels, specifics):
# (272, 1089)
# gaussian sensing, mean = 0, std_dev = 1/input_width
Phi_input = np.random.normal(0, 1./specifics['input_width'], size=(specifics['m'], specifics['n']))
mean = np.mean(Phi_input)
std_dev = np.std(Phi_input)
# X_data = Training_labels.data.numpy().transpose()
X_data = Training_labels.transpose()
Y_data = np.dot(Phi_input, X_data)
Y_YT = np.dot(Y_data, Y_data.transpose())
X_YT = np.dot(X_data, Y_data.transpose())
Qinit = np.dot(X_YT, np.linalg.inv(Y_YT))
del X_data, Y_data, X_YT, Y_YT
return Phi_input, Qinit
| StarcoderdataPython |
1741637 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import sys
def combineJsons(jsonFile1, jsonFile2, outputFile):
dict1 = json.load(open(jsonFile1))
dict2 = json.load(open(jsonFile2))
dict3 = dict(dict1.items() + dict2.items())
with open(outputFile, 'w') as output:
json.dump(dict3, output, indent=2, sort_keys=True)
return True
if __name__ == '__main__':
if (len(sys.argv) < 4):
raise Exception,u"3 arguments needed"
print(combineJsons(sys.argv[1], sys.argv[2], sys.argv[3]))
| StarcoderdataPython |
121438 | import vcr
from botocore.exceptions import ClientError
from service import get_entries, handler
@vcr.use_cassette()
def test_get_entries():
"""Reads from the `test_get_entries` cassette and processes the entries.
"""
entries = get_entries()
assert len(entries) == 633
print(entries[0])
expected_entry = {
"language": "English",
"title": "Social Media Monitoring (EN578-141760/B)",
"reference_number": "PW-$$CY-007-64441",
"solicitation_number": "EN578-141760/B",
"amendment_number": "002",
"publication_date": "2014-01-20",
"date_closing": "2021-01-29 14:00 Eastern Standard Time (EST)",
"amendment_date": "2019-12-02",
"publishing_status": "Active",
"gsin": "T004KA - Social Media Monitoring",
"region_opportunity": "",
"region_delivery": "Alberta, British Columbia, Manitoba, National Capital Region, "
"New Brunswick, Newfoundland and Labrador, Northwest Territories, "
"Nova Scotia, Nunavut, Ontario, Prince Edward Island, Quebec, "
"Saskatchewan, Yukon",
"notice_type": "APM-NPP",
"trade_agreement": "Canadian Free Trade Agreement (CFTA)",
"tendering_procedure": "The bidder must supply Canadian goods and/or services",
"competitive_procurement_strategy": "Subsequent/Follow-on Contracts",
"non_competitive_procurement_strategy": "",
"procurement_entity": "Public Works and Government Services Canada",
"end_user_entity": "Public Works and Government Services Canada",
"description": "Trade Agreement: Canadian Free Trade Agreement (CFTA) Tendering "
"Procedures: The bidder must supply Canadian goods and/or services "
"Competitive Procurement Strategy: Subsequent/Follow-on Contracts "
"Comprehensive Land Claim Agreement: No Nature of Requirements: "
"Delivery Date: Above-mentioned The Crown retains the right to negotiate "
"with suppliers on any procurement. Documents may be submitted in either "
"official language of Canada.",
"access_terms_of_use": 'Procurement data carries an "Open Government Licence - Canada" '
"that governs its use. Please refer to the section about "
'Commercial Reproduction in the Buyandsell.gc.ca "Terms and '
'Conditions" for more information.',
"contact": "<NAME>, <EMAIL>, (613) 990-5858 ( ), "
"360 Albert St./ 360, rue Albert 12th Floor / 12ième étage Ottawa Ontario K1A "
"0S5, 360 Albert St./ 360, rue Albert 12th Floor / 12ième étage Ottawa Ontario "
"K1A 0S5",
"document": "https://buyandsell.gc.ca/cds/public/2014/01/17"
"/e7a31ca7070b9aa566f700a119af2564/ABES.PROD.PW__CY.B007.E64441.EBSU000.PDF, "
"https://buyandsell.gc.ca/cds/public/2014/01/17"
"/26e2138a386a491d75701a9f9c29af8f/ABES.PROD.PW__CY.B007.F64441.EBSU000.PDF, "
"https://buyandsell.gc.ca/cds/public/2019/01/31"
"/0de45f938d13bfa165fc78980e01b6fe/ABES.PROD.PW__CY.B007.E64441.EBSU001.PDF, "
"https://buyandsell.gc.ca/cds/public/2019/01/31"
"/7f4b16c8e7733f78bc92fb9b3cc20e5e/ABES.PROD.PW__CY.B007.F64441.EBSU001.PDF, "
"https://buyandsell.gc.ca/cds/public/2019/11/29"
"/8310ce0aa690eea9983c35232b46133c/ABES.PROD.PW__CY.B007.E64441.EBSU002.PDF, "
"https://buyandsell.gc.ca/cds/public/2019/11/29"
"/5b02fa84f5d2c320b5e65511a19b91b6/ABES.PROD.PW__CY.B007.F64441.EBSU002.PDF",
"attachment": "",
}
assert entries[0] == expected_entry
def test_handler_handles_s3_client_error(mocker):
"""Ensures any S3 client errors get handled"""
mocker.patch("service.S3_CLIENT.put_object", side_effect=ClientError({}, "failure"))
assert handler(None, None) is False
| StarcoderdataPython |
34930 | #!/usr/bin/env python
from setuptools import find_packages, setup
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
setup(
name="tplink-wr-api",
version="0.2.1",
url="https://github.com/n1k0r/tplink-wr-api",
author="n1k0r",
author_email="<EMAIL>",
description="API to some budget TP-Link routers",
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Intended Audience :: Telecommunications Industry",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Communications",
"Topic :: Software Development :: Libraries",
"Topic :: System :: Networking",
],
packages=find_packages(exclude=["tests", "tests.*"]),
python_requires=">=3.8",
install_requires=[
"requests~=2.26",
],
)
| StarcoderdataPython |
3380167 | import repetition
| StarcoderdataPython |
1745933 | # small wrapper script for all cmake calls
# to build all C and CUDA libs
# supposed to be OS independent
import argparse
import os
from tempfile import mkdtemp
from shutil import rmtree
parser = argparse.ArgumentParser(description = 'Build C/CUDA libs with cmake and install \
them to the correct location for the python package')
parser.add_argument('--build_dir', help = 'temp build directory',
default = None)
parser.add_argument('--source_dir', help = 'cmake source dir',
default = os.path.dirname(os.path.abspath(__file__)))
parser.add_argument('--cmake_install_prefix', help = 'cmake INSTALL_LIB_DIR - default: %(default)s',
default = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'pyparallelproj'))
parser.add_argument('--keep_build_dir', help = 'do not remove tempory build dir',
action = 'store_true')
parser.add_argument('--dry', help = 'dry run - only print cmake commands',
action = 'store_true')
args = parser.parse_args()
#---------------------------------------------------------------------------------------------
if args.build_dir is None:
build_dir = mkdtemp(prefix = 'build_', dir = '.')
else:
build_dir = args.build_dir
source_dir = args.source_dir
cmake_install_prefix = args.cmake_install_prefix
remove_build_dir = not args.keep_build_dir
dry = args.dry
#---------------------------------------------------------------------------------------------
if os.name == 'nt':
cmd1 = f'cmake -B {build_dir} -DCMAKE_WINDOWS_EXPORT_ALL_SYMBOLS=TRUE -DCMAKE_INSTALL_PREFIX={cmake_install_prefix} {source_dir}'
cmd2 = f'cmake --build {build_dir} --target INSTALL --config RELEASE'
if dry:
print(cmd1,'\n')
print(cmd2)
else:
os.system(cmd1)
os.system(cmd2)
else:
cmd1 = f'cmake -B {build_dir} -DCMAKE_INSTALL_PREFIX={cmake_install_prefix} {source_dir}'
cmd2 = f'cmake --build {build_dir}'
cmd3 = f'cmake --install {build_dir}'
if dry:
print(cmd1,'\n')
print(cmd2,'\n')
print(cmd3)
else:
os.system(cmd1)
os.system(cmd2)
os.system(cmd3)
if remove_build_dir:
rmtree(build_dir)
else:
print(f'Kept build directory {build_dir}')
| StarcoderdataPython |
1714957 | # Kimi language interpreter in Python 3
# <NAME>
# http://www.github.com/vakila/kimi
import special_forms as sf
from environments import Environment
from errors import *
SPECIALS = sf.special_forms()
def evaluate(expression, environment):
'''Take an expression and environment as dictionaries.
Evaluate the expression in the context of the environment, and return the result.
>>> evaluate(parse(tokenize("(+ 1 2)")), standard_env())
3
'''
# print("EVALUATING:", expression)
expr_type = expression['type']
# print("EXPR_TYPE:", expr_type)
if expr_type == 'literal':
return expression['value']
elif expr_type == 'symbol':
symbol = expression['value']
return environment.get(symbol)
elif expr_type == 'apply':
operator = expression['operator']
if operator['type'] == 'symbol' and operator['value'] in SPECIALS:
return SPECIALS[operator['value']](expression['arguments'], environment)
fn = evaluate(operator, environment)
assert_or_throw(callable(fn), "type", 'Trying to call a non-function. Did you use parentheses correctly?')
return fn(*[evaluate(arg, environment) for arg in expression['arguments']])
else:
complain_and_die("PARSING ERROR! Unexpected expression type: " + str(expression) + ".")
| StarcoderdataPython |
1740048 | <gh_stars>1-10
# Copyright 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import logging
import os
import shutil
import subprocess
from appimagebuilder.utils.finder import Finder
from .base_helper import BaseHelper
from ..environment import Environment
class GStreamer(BaseHelper):
def configure(self, env: Environment, preserve_files):
self._set_gst_plugins_path(env)
self._set_gst_plugins_scanner_path(env)
self._set_ptp_helper_path(env)
self._generate_gst_registry(env)
def _set_gst_plugins_path(self, env):
gst_1_lib_path = self.finder.find_one(
"*/libgstreamer-1.0.so.0", [Finder.is_file, Finder.is_elf_shared_lib]
)
if gst_1_lib_path:
gst_plugins_path = os.path.join(
os.path.dirname(gst_1_lib_path), "gstreamer-1.0"
)
env.set("GST_PLUGIN_PATH", gst_plugins_path)
env.set("GST_PLUGIN_SYSTEM_PATH", gst_plugins_path)
def _set_gst_plugins_scanner_path(self, app_run):
gst_plugins_scanner_path = self.finder.find_one(
"gst-plugin-scanner", [Finder.is_file, Finder.is_executable]
)
if gst_plugins_scanner_path:
app_run.set("GST_REGISTRY_REUSE_PLUGIN_SCANNER", "no")
app_run.set("GST_PLUGIN_SCANNER", gst_plugins_scanner_path)
def _set_ptp_helper_path(self, app_run):
gst_ptp_helper_path = self.finder.find_one(
"*/gst-ptp-helper", [Finder.is_file, Finder.is_executable]
)
if gst_ptp_helper_path:
app_run.set("GST_PTP_HELPER", gst_ptp_helper_path)
def _generate_gst_registry(self, env):
gst_launch_bin = shutil.which("gst-launch-1.0")
if gst_launch_bin and "GST_PLUGIN_PATH" in env:
env.set("GST_REGISTRY", env["GST_PLUGIN_PATH"] + "/registry.bin")
gst_launch_env = self._prepare_gst_launch_env(env)
# run gst "diagnostic" to force registry generation
# https://gstreamer.freedesktop.org/documentation/tools/gst-launch.html?gi-language=c#diagnostic
proc = subprocess.run(
[gst_launch_bin, "fakesrc", "num-buffers=16", "!", "fakesink"],
env=gst_launch_env,
)
if proc.returncode == 0:
env.set("GST_REGISTRY_UPDATE", "no")
logging.info(f"GST_REGISTRY generated at: {env['GST_REGISTRY']}")
else:
logging.warning(f"GST_REGISTRY generation failed!")
del env["GST_REGISTRY"]
else:
logging.warning(
f"gst-launch-1.0 not found! It is required to generate gstreamer registry"
)
def _prepare_gst_launch_env(self, env):
gst_launch_env = os.environ
for key in env.keys():
if key.startswith("GST"):
gst_launch_env[key] = env[key].__str__()
return gst_launch_env
| StarcoderdataPython |
16670 | from Tkinter import *
import ttk
import BuyBook
import BookInformationPage
import Message
class UserPage(object):
def __init__(self, root, color, font, dbConnection, userInfo):
for child in root.winfo_children():
child.destroy()
self.root = root
self.color = color
self.font = font
self.dbConnection = dbConnection
self.userInfo = userInfo
self.screen_width = self.root.winfo_screenwidth() * 3 / 4
self.screen_height = self.root.winfo_screenheight() * 3 / 4
self.gui_init()
def gui_init(self):
self.up_frame = Frame(
self.root,
cursor='hand1',
bg=self.color,
height=self.screen_height / 8,
width=self.screen_width)
self.up_frame.grid_propagate(0)
self.up_frame.pack(side=TOP, expand=True, fill=BOTH)
self.down_frame = Frame(
self.root,
cursor='hand1',
bg=self.color,
height=self.screen_height * 7 / 8,
width=self.screen_width)
self.down_frame.grid_propagate(0)
self.down_frame.pack(side=TOP, expand=True, fill=BOTH)
self.profileFrame = ProfileFrame(self.up_frame, self.screen_width / 2,
self.screen_height / 8, self.color,
self.font, self.userInfo)
self.logoutFrame = LogOutFrame(
self.root, self.up_frame, self.screen_width / 2,
self.screen_height / 8, self.color, self.font, self.dbConnection)
self.booksInfoFrame = BuyedBooks(
self.down_frame, self.screen_width, self.screen_height * 7 / 8,
self.color, self.font, self.dbConnection, self.userInfo)
class ProfileFrame(object):
def __init__(self, root, width, height, color, font, userInfo):
self.root = root
self.width = width
self.height = height
self.color = color
self.font = font
self.userInfo = userInfo
self.gui_init()
def gui_init(self):
self.frame = Frame(
self.root,
cursor='hand1',
bg=self.color,
bd=5,
relief=RAISED,
width=self.width,
height=self.height)
self.frame.pack(expand=True, side=LEFT, fill=BOTH)
self.frame.grid_propagate(0)
profile_info = self.extract_profile()
self.profileLabel = Label(
self.frame, text=profile_info, font=self.font, bg=self.color)
self.profileLabel.place(relx=0.5, rely=0.5, anchor='center')
def extract_profile(self):
userInfo = "\n".join(self.userInfo.values())
return userInfo
class LogOutFrame(object):
def __init__(self, parent, root, width, height, color, font, dbConnection):
self.root = root
self.width = width
self.height = height
self.color = color
self.font = font
self.parent = parent
self.dbConnection = dbConnection
self.gui_init()
def gui_init(self):
self.frame = Frame(
self.root,
cursor='hand1',
bd=5,
relief=RAISED,
bg=self.color,
width=self.width,
height=self.height)
self.frame.pack(side=LEFT, expand=True, fill=BOTH)
self.frame.grid_propagate(0)
self.logout_button = Button(
self.frame, text="LogOut", font=self.font, borderwidth=5)
self.logout_button.place(relx=0.5, rely=0.5, anchor='center')
self.logout_button.bind("<Button-1>", self.__logOutAction)
def __logOutAction(self, event):
self.dbConnection.close()
for child in self.parent.winfo_children():
child.destroy()
self.parent.destroy()
class BuyedBooks(object):
def __init__(self, root, width, height, color, font, dbConnection,
userInfo):
self.root = root
self.width = width
self.height = height
self.color = color
self.font = font
self.dbConnection = dbConnection
self.userInfo = userInfo
self.gui_init()
def gui_init(self):
frame_up = Frame(
self.root,
cursor='hand1',
bg=self.color,
width=self.width,
height=self.height * 1 / 12)
frame_up.grid_propagate(0)
frame_up.pack(side=TOP, expand=True, fill=BOTH)
frame_middle = Frame(
self.root,
cursor='hand1',
bg=self.color,
width=self.width,
height=self.height * 10 / 12)
frame_middle.grid_propagate(0)
frame_middle.pack(side=TOP, expand=True, fill=BOTH)
frame_down = Frame(
self.root,
cursor='hand1',
bg=self.color,
width=self.width,
height=self.height * 1 / 12)
frame_down.grid_propagate(0)
frame_down.pack(side=TOP, expand=True, fill=BOTH)
self.uploadedFilesLabel = Label(
frame_up, text="BuyedBooks", font=self.font, bg=self.color)
self.uploadedFilesLabel.place(relx=0.5, rely=0.5, anchor='center')
self.booksDisplay = ttk.Treeview(
frame_middle,
columns=('#1', '#2', '#3', '#4', '#5'),
height=20,
show='headings',
padding=(1, 1, 1, 1))
self.booksDisplay.heading('#1', text='Title')
self.booksDisplay.heading('#2', text='Author')
self.booksDisplay.heading('#3', text='Genre')
self.booksDisplay.heading('#4', text='Quantity')
self.booksDisplay.heading('#5', text='Review Score')
self.booksDisplay.column('#1', stretch=True, width=self.width / 5)
self.booksDisplay.column('#2', stretch=True, width=self.width / 5)
self.booksDisplay.column('#3', stretch=True, width=self.width / 5)
self.booksDisplay.column('#4', stretch=True, width=self.width / 5)
self.booksDisplay.column('#5', stretch=True, width=self.width / 5)
self.booksDisplay.pack(side=TOP, fill=BOTH, expand=True)
#self.booksDisplay.grid(row=5, columnspan=4, sticky='nw')
#self.booksDisplay.place(relx=0.5, rely=0.5, anchor='center')
self.booksDisplayStyle = ttk.Style()
self.booksDisplayStyle.configure(
"Treeview", font=self.font, rowheight=50)
self.booksDisplayStyle.configure("Treeview.Heading", font=self.font)
#bind treeview to mouse click
self.booksDisplay.bind("<ButtonRelease-1>", self.__bookInfo)
self.booksDisplay.tag_configure(
"tagBook", background="white", foreground="red", font=self.font)
self.addNewBookButton = Button(
frame_down, text="Buy new book", font=self.font)
self.addNewBookButton.place(relx=0.5, rely=0.5, anchor='center')
self.addNewBookButton.bind("<Button-1>", self.__buyNewBook)
self.__display_availableBooks()
def __buyNewBook(self, event):
new_window = Toplevel(self.root)
BuyBook.BuyBook(new_window, self.color, self.font, self.dbConnection,
self.userInfo)
new_window.wait_window()
self.__display_availableBooks()
def __bookInfo(self, event):
selectedItem = self.booksDisplay.focus()
valueItem = self.booksDisplay.item(selectedItem)['values']
bookName=valueItem[0]
new_window = Toplevel(self.root)
newBookInfo = BookInformationPage.BookInformation(
new_window, self.color, self.dbConnection, valueItem[0], self.userInfo)
new_window.wait_window()
self.__display_availableBooks()
def __display_availableBooks(self):
for child in self.booksDisplay.get_children():
self.booksDisplay.delete(child)
cursor = self.dbConnection.cursor()
args = (self.userInfo['userName'], )
cursor.callproc('getUsersBooks', args)
for result in cursor.stored_results():
books = result.fetchall()
for book in books:
self.booksDisplay.insert(
'', 'end', values=book, tags='tagBook')
cursor.close()
| StarcoderdataPython |
3348737 | <filename>extensions/aria_extension_tosca/simple_v1_0/misc.py
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from aria.utils.caching import cachedmethod
from aria.utils.console import puts
from aria.utils.formatting import as_raw
from aria.parser import implements_specification
from aria.parser.presentation import (AsIsPresentation, has_fields, allow_unknown_fields,
short_form_field, primitive_field, primitive_list_field,
primitive_dict_unknown_fields, object_field,
object_list_field, object_dict_field, field_getter,
field_validator, type_validator, not_negative_validator)
from .data_types import Version
from .modeling.data_types import (get_data_type, get_data_type_value, get_property_constraints,
apply_constraint_to_value)
from .modeling.substitution_mappings import (validate_substitution_mappings_requirement,
validate_substitution_mappings_capability)
from .presentation.extensible import ExtensiblePresentation
from .presentation.field_getters import data_type_class_getter
from .presentation.field_validators import (constraint_clause_field_validator,
constraint_clause_in_range_validator,
constraint_clause_valid_values_validator,
constraint_clause_pattern_validator,
data_type_validator)
from .presentation.types import (convert_name_to_full_type_name, get_type_by_name)
@implements_specification('3.5.1', 'tosca-simple-1.0')
class Description(AsIsPresentation):
"""
Human-readable description.
See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
#DEFN_ELEMENT_DESCRIPTION>`__
"""
def __init__(self, name=None, raw=None, container=None, cls=None): # pylint: disable=unused-argument
super(Description, self).__init__(name, raw, container, cls=unicode)
def _dump(self, context):
value = as_raw(self.value)
puts(context.style.meta_style(value))
@allow_unknown_fields
@has_fields
@implements_specification('3.9.3.2', 'tosca-simple-1.0')
class MetaData(ExtensiblePresentation):
"""
Meta data.
"""
@primitive_field(str)
@implements_specification('3.9.3.3', 'tosca-simple-1.0')
def template_name(self):
"""
This optional metadata keyname can be used to declare the name of service template as a
single-line string value.
"""
@primitive_field(str)
@implements_specification('3.9.3.4', 'tosca-simple-1.0')
def template_author(self):
"""
This optional metadata keyname can be used to declare the author(s) of the service template
as a single-line string value.
"""
@field_getter(data_type_class_getter(Version, allow_null=True))
@primitive_field(str)
@implements_specification('3.9.3.5', 'tosca-simple-1.0')
def template_version(self):
"""
This optional metadata keyname can be used to declare a domain specific version of the
service template as a single-line string value.
"""
@primitive_dict_unknown_fields(str)
def custom(self):
"""
:type: dict
"""
@short_form_field('url')
@has_fields
@implements_specification('3.5.5', 'tosca-simple-1.0')
class Repository(ExtensiblePresentation):
"""
A repository definition defines a named external repository which contains deployment and
implementation artifacts that are referenced within the TOSCA Service Template.
See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
#DEFN_ELEMENT_REPOSITORY_DEF>`__
"""
@object_field(Description)
def description(self):
"""
The optional description for the repository.
:type: :class:`Description`
"""
@primitive_field(str, required=True)
def url(self):
"""
The required URL or network address used to access the repository.
:type: :obj:`basestring`
"""
@primitive_field()
def credential(self):
"""
The optional Credential used to authorize access to the repository.
:type: tosca.datatypes.Credential
"""
@cachedmethod
def _get_credential(self, context):
return get_data_type_value(context, self, 'credential', 'tosca.datatypes.Credential')
def _validate(self, context):
super(Repository, self)._validate(context)
self._get_credential(context)
@short_form_field('file')
@has_fields
@implements_specification('3.5.7', 'tosca-simple-1.0')
class Import(ExtensiblePresentation):
"""
An import definition is used within a TOSCA Service Template to locate and uniquely name another
TOSCA Service Template file which has type and template definitions to be imported (included)
and referenced within another Service Template.
See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
#DEFN_ELEMENT_IMPORT_DEF>`__
"""
@primitive_field(str, required=True)
def file(self):
"""
The required symbolic name for the imported file.
:type: :obj:`basestring`
"""
@primitive_field(str)
def repository(self):
"""
The optional symbolic name of the repository definition where the imported file can be found
as a string.
:type: :obj:`basestring`
"""
@primitive_field(str)
def namespace_uri(self):
"""
The optional namespace URI to that will be applied to type definitions found within the
imported file as a string.
:type: :obj:`basestring`
"""
@primitive_field(str)
def namespace_prefix(self):
"""
The optional namespace prefix (alias) that will be used to indicate the namespace_uri when
forming a qualified name (i.e., qname) when referencing type definitions from the imported
file.
:type: :obj:`basestring`
"""
@has_fields
@implements_specification('3.5.2-1', 'tosca-simple-1.0')
class ConstraintClause(ExtensiblePresentation):
"""
A constraint clause defines an operation along with one or more compatible values that can be
used to define a constraint on a property or parameter's allowed values when it is defined in a
TOSCA Service Template or one of its entities.
See the `TOSCA Simple Profile v1.0 cos01 specification <http://docs.oasis-open.org/tosca
/TOSCA-Simple-Profile-YAML/v1.0/cos01/TOSCA-Simple-Profile-YAML-v1.0-cos01.html
#DEFN_ELEMENT_CONSTRAINTS_CLAUSE>`__
"""
@field_validator(constraint_clause_field_validator)
@primitive_field()
def equal(self):
"""
Constrains a property or parameter to a value equal to ('=') the value declared.
"""
@field_validator(constraint_clause_field_validator)
@primitive_field()
def greater_than(self):
"""
Constrains a property or parameter to a value greater than ('>') the value declared.
"""
@field_validator(constraint_clause_field_validator)
@primitive_field()
def greater_or_equal(self):
"""
Constrains a property or parameter to a value greater than or equal to ('>=') the value
declared.
"""
@field_validator(constraint_clause_field_validator)
@primitive_field()
def less_than(self):
"""
Constrains a property or parameter to a value less than ('<') the value declared.
"""
@field_validator(constraint_clause_field_validator)
@primitive_field()
def less_or_equal(self):
"""
Constrains a property or parameter to a value less than or equal to ('<=') the value
declared.
"""
@field_validator(constraint_clause_in_range_validator)
@primitive_list_field()
def in_range(self):
"""
Constrains a property or parameter to a value in range of (inclusive) the two values
declared.
Note: subclasses or templates of types that declare a property with the ``in_range``
constraint MAY only further restrict the range specified by the parent type.
"""
@field_validator(constraint_clause_valid_values_validator)
@primitive_list_field()
def valid_values(self):
"""
Constrains a property or parameter to a value that is in the list of declared values.
"""
@field_validator(not_negative_validator)
@primitive_field(int)
def length(self):
"""
Constrains the property or parameter to a value of a given length.
"""
@field_validator(not_negative_validator)
@primitive_field(int)
def min_length(self):
"""
Constrains the property or parameter to a value to a minimum length.
"""
@field_validator(not_negative_validator)
@primitive_field(int)
def max_length(self):
"""
Constrains the property or parameter to a value to a maximum length.
"""
@field_validator(constraint_clause_pattern_validator)
@primitive_field(str)
def pattern(self):
"""
Constrains the property or parameter to a value that is allowed by the provided regular
expression.
Note: Future drafts of this specification will detail the use of regular expressions and
reference an appropriate standardized grammar.
"""
@cachedmethod
def _get_type(self, context):
if hasattr(self._container, '_get_type_for_name'):
# NodeFilter or CapabilityFilter
return self._container._get_type_for_name(context, self._name)
elif hasattr(self._container, '_get_type'):
# Properties
return self._container._get_type(context)
else:
# DataType (the DataType itself is our type)
return self._container
def _apply_to_value(self, context, presentation, value):
return apply_constraint_to_value(context, presentation, self, value)
@short_form_field('type')
@has_fields
class EntrySchema(ExtensiblePresentation):
"""
ARIA NOTE: The specification does not properly explain this type, however it is implied by
examples.
"""
@field_validator(data_type_validator('entry schema data type'))
@primitive_field(str, required=True)
def type(self):
"""
:type: :obj:`basestring`
"""
@object_field(Description)
def description(self):
"""
:type: :class:`Description`
"""
@object_list_field(ConstraintClause)
def constraints(self):
"""
:type: list of (str, :class:`ConstraintClause`)
"""
@cachedmethod
def _get_type(self, context):
return get_data_type(context, self, 'type')
@cachedmethod
def _get_constraints(self, context):
return get_property_constraints(context, self)
@short_form_field('primary')
@has_fields
class OperationImplementation(ExtensiblePresentation):
"""
Operation implementation.
"""
@primitive_field(str)
def primary(self):
"""
The optional implementation artifact name (i.e., the primary script file name within a
TOSCA CSAR file).
:type: :obj:`basestring`
"""
@primitive_list_field(str)
def dependencies(self):
"""
The optional ordered list of one or more dependent or secondary implementation artifact name
which are referenced by the primary implementation artifact (e.g., a library the script
installs or a secondary script).
:type: [:obj:`basestring`]
"""
class SubstitutionMappingsRequirement(AsIsPresentation):
"""
Substitution mapping for requirement.
"""
@property
@cachedmethod
def node_template(self):
return str(self._raw[0])
@property
@cachedmethod
def requirement(self):
return str(self._raw[1])
def _validate(self, context):
super(SubstitutionMappingsRequirement, self)._validate(context)
validate_substitution_mappings_requirement(context, self)
class SubstitutionMappingsCapability(AsIsPresentation):
"""
Substitution mapping for capability.
"""
@property
@cachedmethod
def node_template(self):
return str(self._raw[0])
@property
@cachedmethod
def capability(self):
return str(self._raw[1])
def _validate(self, context):
super(SubstitutionMappingsCapability, self)._validate(context)
validate_substitution_mappings_capability(context, self)
@has_fields
@implements_specification('2.10', 'tosca-simple-1.0')
class SubstitutionMappings(ExtensiblePresentation):
"""
Substitution mappings.
"""
@field_validator(type_validator('node type', convert_name_to_full_type_name, 'node_types'))
@primitive_field(str, required=True)
def node_type(self):
"""
:type: :obj:`basestring`
"""
@object_dict_field(SubstitutionMappingsRequirement)
def requirements(self):
"""
:type: {:obj:`basestring`: :class:`SubstitutionMappingsRequirement`}
"""
@object_dict_field(SubstitutionMappingsCapability)
def capabilities(self):
"""
:type: {:obj:`basestring`: :class:`SubstitutionMappingsCapability`}
"""
@cachedmethod
def _get_type(self, context):
return get_type_by_name(context, self.node_type, 'node_types')
def _validate(self, context):
super(SubstitutionMappings, self)._validate(context)
self._get_type(context)
def _dump(self, context):
self._dump_content(context, (
'node_type',
'requirements',
'capabilities'))
| StarcoderdataPython |
1732372 | import numpy as np
def count_overlaps(*grids):
return tuple(np.sum(grid >= 2) for grid in grids)
def solve(x):
coords = np.array([[coord.split(',') for coord in line.split(' -> ')] for line in x], dtype=int)
deltas = coords[:,1] - coords[:,0]
signs = np.where(deltas >= 0, 1, -1)
grid = np.zeros((2, *np.amax(coords, axis=(0,2))+1), dtype=int)
for c, d, s in zip(coords, deltas, signs):
grid[int(np.all(d!=0))][tuple(c[0,i]+np.arange(0,d[i]+s[i],s[i]) for i in range(2))] += 1
return count_overlaps(grid[0], grid[0]+grid[1]) | StarcoderdataPython |
175566 | <gh_stars>1-10
import setuptools as st
st.setup(name='tracking',
version='0.1',
author='<NAME>, <NAME>',
packages=st.find_packages())
| StarcoderdataPython |
153177 | <gh_stars>0
from rest_framework import serializers
from pitanja.models import Test, Pitanje, Odgovor, OdgovorUcenika
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = Test
fields = '__all__'
class PitanjeSerializer(serializers.ModelSerializer):
class Meta:
model = Pitanje
fields = '__all__'
class OdgovorSerializer(serializers.ModelSerializer):
class Meta:
model = Odgovor
fields = '__all__'
class KompletOdgovorSerializer(serializers.ModelSerializer):
class Meta:
model = Odgovor
fields = ['id', 'redni_broj', 'tekst']
class KompletPitanjeSerializer(serializers.ModelSerializer):
odgovor_set = KompletOdgovorSerializer(many=True, read_only=True)
class Meta:
model = Pitanje
fields = ['id', 'redni_broj', 'tip', 'tekst', 'odgovor_set']
class KompletTestSerializer(serializers.ModelSerializer):
pitanje_set = KompletPitanjeSerializer(many=True, read_only=True)
class Meta:
model = Test
fields = ['id', 'naziv', 'datum_vazenja', 'pitanje_set']
class OdgovorUcenikaSerializer(serializers.ModelSerializer):
class Meta:
model = OdgovorUcenika
fields = '__all__'
| StarcoderdataPython |
3317627 | """
.. _`geometry`:
"""
"""Pseudo package for convenient import of geometry classes."""
from openmdao.lib.geometry.geom_data import GeomData
from openmdao.lib.geometry.stl_group import STLGroup | StarcoderdataPython |
3349104 | <reponame>xiaohan2012/capitalization-restoration-train<gh_stars>1-10
import numpy as np
import pandas as pds
def calc(input, labels=['AL', 'IC']):
"""
Return:
- Label-wise average
- Average item accuracy
- micro/macro average
"""
if input.ndim == 2:
prf1 = np.zeros(input.shape, dtype=np.float64)
prf1[:, 0] = input[:, 0] / input[:, 1]
# recall
prf1[:, 1] = input[:, 0] / input[:, 2]
# f1
prf1[:, 2] = (2 * prf1[:, 0] * prf1[:, 1] /
(prf1[:, 0] + prf1[:, 1]))
label_wise = prf1
micro_sum = input.sum(axis=0)
micro_rec = np.mean(micro_sum[0] / micro_sum[2])
micro_prec = np.mean(micro_sum[0] / micro_sum[1])
micro_f1 = 2 * (micro_prec * micro_rec) / (micro_prec + micro_rec)
m_avg = np.asarray([[micro_prec, micro_rec, micro_f1],
prf1.mean(axis=0)])
return (label_wise, micro_prec, m_avg)
else:
# precision, recall and f1 for each
prf1 = np.zeros(input.shape, dtype=np.float64)
n_exper, n_label, _ = input.shape
for i in xrange(n_exper):
# precision
prf1[i, :, 0] = input[i, :, 0] / input[i, :, 1]
# recall
prf1[i, :, 1] = input[i, :, 0] / input[i, :, 2]
# f1
prf1[i, :, 2] = (2 * prf1[i, :, 0] * prf1[i, :, 1] /
(prf1[i, :, 0] + prf1[i, :, 1]))
label_wise_avg = prf1.mean(axis=0)
# micro
micro_prf1 = np.zeros((n_exper, 3))
micro_input = input.sum(axis=1)
micro_prf1[:, 0] = micro_input[:, 0] / micro_input[:, 1]
micro_prf1[:, 1] = micro_input[:, 0] / micro_input[:, 2]
micro_prf1[:, 2] = (2 * micro_prf1[:, 0] * micro_prf1[:, 1] /
(micro_prf1[:, 0] + micro_prf1[:, 1]))
avg_item_acc = np.mean(micro_input[:, 0] / micro_input[:, 2])
m_avg = np.asarray([prf1.mean(axis=1).mean(axis=0),
micro_prf1.mean(axis=0)])
return label_wise_avg, avg_item_acc, m_avg
def calc_and_print(input, labels):
label_wise_avg, avg_item_acc, m_avg = calc(input, labels)
def make_2d_df(data, index):
return pds.DataFrame(data, columns=["precision", "recall", "f1"],
index=index)
print("Label-wise average:")
print(make_2d_df(label_wise_avg*100, index=labels))
print('')
print("Average item accuracy:")
print(avg_item_acc * 100)
print('')
print(make_2d_df(m_avg * 100,
index=['micro-average', 'macro-average']))
if __name__ == '__main__':
# Repalce the following array for cross validation result
# like the result by CRF classiifer
input = np.asarray([[(97578, 99672, 98268),
(16014, 16704, 18108)],
[(96629, 98651, 97323),
(16122, 16816, 18144)],
[(96874, 98869, 97558),
(16172, 16856, 18167)],
[(97035, 99096, 97743),
(15995, 16703, 18056)],
[(97788, 99692, 98499),
(16115, 16826, 18019)],
[(96769, 98776, 97506),
(16236, 16973, 18243)],
[(97720, 99764, 98383),
(15820, 16483, 17864)],
[(97653, 99824, 98349),
(16164, 16860, 18335)],
[(97259, 99228, 97922),
(16188, 16851, 18157)],
[(96936, 98997, 97710),
(16060, 16834, 18121)]],
dtype=np.float64)
input = input.sum(axis=0)
print pds.DataFrame(input, columns=['#match', '#model', '#ref'], index=['AL', 'IC'])
# Repalce the following array for cross validation result
# like the result by rule-based classiifer
input = np.asarray([[131573., 148200., 146940.],
[763940., 779307., 780567.]])
calc_and_print(input, ['AL', 'IC'])
| StarcoderdataPython |
1770562 | <filename>glassnode_files_organizer.py
from os import listdir
from os.path import isfile, join
import os
import json
import re
from pprint import pprint
import pandas as pd
import os
import errno
def glassnode_files_organizer():
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
mypath = "/home/locsta/Documents/Glassnode 4"
files = [f for f in listdir(mypath) if isfile(join(mypath, f))]
json_files_to_convert = [mypath + "/" + f for f in files if f.endswith(".json")]
for json_file_path in json_files_to_convert:
with open(json_file_path) as json_file:
data = json.load(json_file)
df = pd.DataFrame(data)
df.to_csv(json_file_path.replace(".json", "(8).csv"),index=False)
os.remove(json_file_path)
files = [f for f in listdir(mypath) if isfile(join(mypath, f))]
# Remove duplicates files
for i in range(10):
[os.remove(mypath + "/" + f) for f in files if f"({i})" in f]
files = [f for f in listdir(mypath) if isfile(join(mypath, f))]
# quit()
count = 0
with open('/home/locsta/Documents/Glassnode 4 (jsons)/files_tree.json') as json_file:
data = json.load(json_file)
categories = data.keys()
for category in categories:
sections = data[category].keys()
for section in sections:
data_names = data[category][section]
# print(data_names)
for name in data_names:
try:
tier = name.split(" - ")[0]
formatted_name = name.split(" - ")[1].lower().replace(" ", "-")
if (formatted_name + ".csv") in files:
print(category)
print(section, f"- ({tier})")
# print(formatted_name)
# quit()
make_sure_path_exists(f"/home/locsta/Documents/GlassNodeStudio/{category}/{section}")
os.rename("path/to/current/file.foo", "path/to/new/destination/for/file.foo")
except:
pass
if __name__ == '__main__':
# execute only if run as the entry point into the program
glassnode_files_organizer() | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.