id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
9699454 | <reponame>AdamShechter9/aws-cloudtrail-athena-script<gh_stars>1-10
#!/bin/python3
"""
At1
<NAME>
Python script to generate all resources needed to start CloudTrail logging on AWS
Steps:
1. Creating S3 Bucket
2. Attaching S3 Bucket Policy
3. Creating new CloudWatch Log Grou
4. Creating new IAM Role for Cloudtrail
5. Creating CloudTrail Trail
6. Running SQL commands in Athena to set up Database and Table
# Copyright 2019 At1 LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the Software),
# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
import boto3
import sys
import os
import time
import string
import random
import logging
# Initialize logger object
def initialize_logger(output_dir):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# create console handler and set level to info
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
# create error file handler and set level to error
handler = logging.FileHandler(os.path.join(output_dir, "error.log"), "w", encoding=None, delay="true")
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("%(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
# Create S3 bucket for CloudTrail logging.
def s3_create_bucket():
try:
if region_name != 'us-east-1':
response = s3_client.create_bucket(
Bucket=s3bucket_name,
CreateBucketConfiguration={
'LocationConstraint': region_name
},
)
else:
response = s3_client.create_bucket(
Bucket=s3bucket_name,
)
logger.info(response)
except Exception as e:
print(e)
logger.error(e)
return
# Load s3 bucket policy from file and update with parameters
# Apply new Bucket Policy
def s3_bucket_policy():
try:
with open("s3_bucket_policy.json", "r") as f:
bucket_policy_raw = f.read()
except Exception as e:
print(e)
logger.error(e)
sys.exit(1)
bucket_policy = bucket_policy_raw.replace("{{BucketName}}", s3bucket_name).replace("{{AccountID}}", account_id)
logger.info(bucket_policy)
try:
response = s3_client.put_bucket_policy(
Bucket=s3bucket_name,
Policy=bucket_policy
)
except Exception as e:
print(e)
logger.error(e)
sys.exit(1)
logger.info(response)
return
# Create a new log cloudwatch log group for CloudTrail
# Get CloudWatch Log Group ARN and return
def logs_create_log_group():
try:
response = logs_client.create_log_group(
logGroupName=cloudwatch_log_group_name,
# kmsKeyId='string',
# tags={
# 'string': 'string'
# }
)
except Exception as e:
print(e)
logger.error(e)
sys.exit(1)
logger.info(response)
try:
response = logs_client.describe_log_groups(
logGroupNamePrefix=cloudwatch_log_group_name
)
except Exception as e:
print(e)
logger.error(e)
sys.exit(1)
logger.info(response)
# print(response['logGroups'][0]['arn'])
return response['logGroups'][0]['arn']
# Create IAM role and policy for cloudtrail
# Return ARN for IAM role
def create_role_cloudtrail():
try:
with open("cloudtrail_assume_role.json", "r") as f:
assume_role_policy = f.read()
except Exception as e:
print(e)
logger.error(e)
sys.exit(1)
try:
response = iam_client.create_role(
RoleName=cloudtrail_role_name,
AssumeRolePolicyDocument=assume_role_policy,
Description='Automated Role for Cloutrail log delivery to Cloudwatch',
)
except Exception as e:
print(e)
logger.error(e)
sys.exit(1)
logger.info(response)
role_arn = response['Role']['Arn']
try:
with open("cloudwatch_role_policy.json", "r") as f:
role_policy_raw = f.read()
except Exception as e:
print(e)
logger.error(e)
sys.exit(1)
role_policy = role_policy_raw.replace("{{log_group_name}}", cloudwatch_log_group_name).replace("{{region}}", region_name).replace("{{accountID}}", account_id)
logger.info(role_policy)
time.sleep(5)
logger.info("creating policy and applying to role")
try:
response = iam_client.put_role_policy(
RoleName=cloudtrail_role_name,
PolicyName='create_put_logs_cloudtrail',
PolicyDocument=role_policy
)
except Exception as e:
print(e)
logger.error(e)
sys.exit(1)
logger.info(response)
return role_arn
# Create a new CloudTrail trail and start logging
def create_trail():
# KMS encryption currently disabled
try:
response = cloudtrail_client.create_trail(
Name=trail_name,
S3BucketName=s3bucket_name,
IncludeGlobalServiceEvents=True,
IsMultiRegionTrail=False,
EnableLogFileValidation=True,
CloudWatchLogsLogGroupArn=cloudwatch_log_group_arn,
CloudWatchLogsRoleArn=cloudtrail_role_arn,
# KmsKeyId='string',
IsOrganizationTrail=False
)
except Exception as e:
print(e)
logger.error(e)
sys.exit(1)
logger.info(response)
time.sleep(5)
logger.info("Starting logging on CloudTrail Trail")
try:
response = cloudtrail_client.start_logging(
Name=response['TrailARN']
)
except Exception as e:
print(e)
logger.error(e)
return
# Set up Athena with a new Database (if it doesn't exist)
# generate the SQL schema for CloudTrail in Athena and apply
def athena_set_up():
try:
with open("athena_sql_create_table.txt", "r") as f:
athena_sql_code_raw = f.read()
except Exception as e:
print(e)
logger.error(e)
sys.exit(1)
athena_sql_code = athena_sql_code_raw.replace("{{db_name}}", db_name).replace("{{bucket_name}}", s3bucket_name).replace("{{account_id}}", account_id)
logger.info(athena_sql_code)
athena_bucket_name = "aws-athena-query-results-" + account_id + "-" + region_name
s3_create_bucket()
time.sleep(5)
output_location = "s3://" + athena_bucket_name
logger.info("Creating Database")
try:
response = athena_client.start_query_execution(
QueryString="CREATE DATABASE IF NOT EXISTS {{db_name}};".replace("{{db_name}}", db_name),
# QueryExecutionContext={
# 'Database': 'string'
# },
ResultConfiguration={
'OutputLocation': output_location
}
)
logger.info(response)
except Exception as e:
print(e)
logger.error(e)
sys.exit(1)
time.sleep(5)
logger.info("Generating CloudTrail Table!")
try:
response = athena_client.start_query_execution(
QueryString=athena_sql_code,
# QueryExecutionContext={
# 'Database': 'string'
# },
ResultConfiguration={
'OutputLocation': output_location
}
)
logger.info(response)
except Exception as e:
print(e)
logger.error(e)
sys.exit(1)
query_execution_id = response['QueryExecutionId']
logger.info("Waiting for Athena Execution results")
time.sleep(10)
try:
response = athena_client.get_query_results(
QueryExecutionId=query_execution_id,
)
logger.info(response)
except Exception as e:
print(e)
logger.error(e)
return
# return a random 6 character string for application name
def randomstring():
chars = string.ascii_lowercase + string.digits
return ''.join(random.choice(chars) for x in range(6))
if __name__ == '__main__':
args = sys.argv[1:]
if not args:
print("This script auto-generates regional CloudTrail logs and makes them available in Athena for querying\nusage: [profile_name] [account] [region_name]")
sys.exit(1)
else:
profile_name = args[0]
account_id = args[1]
region_name = args[2]
logger = initialize_logger('./')
try:
session = boto3.Session(profile_name=profile_name)
# Any clients created from this session will use credentials
ec2_client = session.client('ec2', region_name=region_name)
cloudtrail_client = session.client('cloudtrail', region_name=region_name)
s3_client = session.client('s3', region_name=region_name)
sts_client = session.client('sts', region_name=region_name)
logs_client = session.client('logs', region_name=region_name)
iam_client = session.client('iam')
athena_client = session.client('athena', region_name)
except Exception as e:
print(e)
logger.error(e)
raise Exception("Error with AWS credentials")
app_name = "-app-" + randomstring()
trail_name = "cloudtrail-" + region_name + app_name
s3bucket_name = "cloudtrail-" + region_name + app_name
cloudwatch_log_group_name = "cloudtrail-log-" + region_name + app_name
cloudtrail_role_name = "cloudtrail-put-get-role" + app_name
db_name = "cloudtrail_db" + app_name
logger.info("1. Creating S3 Bucket")
s3_create_bucket()
logger.info("2. Attaching S3 Bucket Policy")
s3_bucket_policy()
logger.info("3. Creating new CloudWatch Log Group")
cloudwatch_log_group_arn = logs_create_log_group()
logger.info("4. Creating new IAM Role for Cloudtrail")
cloudtrail_role_arn = create_role_cloudtrail()
time.sleep(20)
logger.info("5. Creating CloudTrail Trail")
create_trail()
logger.info("6. Running SQL commands in Athena to set up Database and Table")
athena_set_up()
logger.info("All done!\nCloudtrail resources created.")
| StarcoderdataPython |
1821253 | <reponame>sleyzerzon/soar
import itertools
from PddlState import PddlState, iterator_is_empty
from PddlStateSmlAdapter import PddlStateSmlAdapter
class blocks_world:
def __init__(self, agent):
if agent:
self.state = PddlStateSmlAdapter(self, agent)
else:
self.state = PddlState()
self.state.predicates['on__table'] = set()
self.state.predicates['on'] = set()
self.state.predicates['clear'] = set()
self.pred_param_names = {}
self.pred_param_names['on__table'] = ('x',)
self.pred_param_names['on'] = ('x','y',)
self.pred_param_names['clear'] = ('x',)
def do_action_MoveToTable(self, args):
try:
_omf = args['omf']
_lower = args['lower']
except KeyError:
return 'Command missing parameters'
if not ((((_omf,) in self.state.predicates['clear']) and ((_omf,_lower,) in self.state.predicates['on']))):
return 'Preconditions not satisfied'
predicate_adds = []
predicate_dels = []
predicate_adds.append(('clear', _lower))
predicate_adds.append(('on__table', _omf))
predicate_dels.append(('on', _omf,_lower))
return (predicate_adds, predicate_dels)
def do_action_MoveToBlock1(self, args):
try:
_omf = args['omf']
_lower = args['lower']
_dest = args['dest']
except KeyError:
return 'Command missing parameters'
if not ((((_omf,) in self.state.predicates['clear']) and ((_dest,) in self.state.predicates['clear']) and ((_omf,_lower,) in self.state.predicates['on']))):
return 'Preconditions not satisfied'
predicate_adds = []
predicate_dels = []
predicate_adds.append(('clear', _lower))
predicate_adds.append(('on', _omf,_dest))
predicate_dels.append(('clear', _dest))
predicate_dels.append(('on', _omf,_lower))
return (predicate_adds, predicate_dels)
def do_action_MoveToBlock2(self, args):
try:
_omf = args['omf']
_dest = args['dest']
except KeyError:
return 'Command missing parameters'
if not ((((_omf,) in self.state.predicates['clear']) and ((_dest,) in self.state.predicates['clear']) and ((_omf,) in self.state.predicates['on__table']))):
return 'Preconditions not satisfied'
predicate_adds = []
predicate_dels = []
predicate_adds.append(('on', _omf,_dest))
predicate_dels.append(('clear', _dest))
predicate_dels.append(('on__table', _omf))
return (predicate_adds, predicate_dels) | StarcoderdataPython |
1814452 | <reponame>jerloo/pyidenticon
import unittest
import os
from pyidenticon import make
class MyTestCase(unittest.TestCase):
def setUp(self):
if not os.path.exists('data'):
os.mkdir('data')
def test_basic_make(self):
img = make('basic')
img.save('data/basic.png')
img.close()
def test_named_color_make(self):
img = make('named_fore_color.blue', fore_color='blue')
img.save('data/named_for_color.blue.png')
img.close()
def test_arbg_color_make(self):
img = make('arbb_fore_color.125.200.136', fore_color=(150, 125, 200, 136))
img.save('data/arbb_color.125.200.136.png')
img.close()
def test_many_make(self):
for index in range(100):
item = 'many_index_{}.png'.format(index)
img = make(item, fore_color=(3, 101, 100), bg_color='grey')
img.save('data/{}'.format(item))
img.close()
# def tearDown(self):
# os.rmdir('data')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
8103895 | import json
import os
import pickle
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
# from semantic.domain_models import sempryv_models as database
class ThryvePulsoTrainer(object):
def __init__(self):
self.train_data_synthetic = []
self.target_data_synthetic = []
self.ontology_names = {"SNOMEDCT": "snomed-ct", "LOINC": "loinc", "LNC": "loinc"}
self.thryve_streams, self.thryve_annotated_data, self.pulso_streams, self.pulso_annotated_data = \
self.load_synthetic_data()
self.code_labels = {}
self.create_train_target_data()
self.count_vect = CountVectorizer()
# self.db = database
@staticmethod
def load_synthetic_data():
# TODO: load data from DB
f_thryve = open('semantic/synthetics/thryve-streams.json', 'r')
f_thryve_annotations = open('semantic/synthetics/data_annotated-thryve-filtered.json', 'r')
f_pulso = open('semantic/synthetics/pulso-streams.json', 'r')
f_pulso_annotations = open('semantic/synthetics/data_annotated-pulso-filtered.json', 'r')
thryve_streams = json.load(f_thryve)
thryve_annotated_data = json.load(f_thryve_annotations)
pulso_streams = json.load(f_pulso)
pulso_annotated_data = json.load(f_pulso_annotations)
f_thryve.close()
f_thryve_annotations.close()
return thryve_streams, thryve_annotated_data, pulso_streams, pulso_annotated_data
def create_train_target_data(self):
# thryve
d_stream_types = self.get_name_types(self.thryve_streams)
for ann_dato in self.thryve_annotated_data:
type = d_stream_types[ann_dato['id']]
name = ann_dato['name']
id = ann_dato['id']
annotations = ann_dato['codes']
codes = ''
for code in annotations:
alter_name = code['prefLabel']
name += ' ' + alter_name
annotation = code['id'].split('/')[-1]
ontology_name = self.ontology_names[code['id'].split('/')[-2]]
if ontology_name == 'loinc':
continue
codes += ontology_name + ':' + annotation + '_'
if codes == '':
continue
label = self.assign_codes_label(codes)
self.train_data_synthetic.append(id + ' ' + name + ' ' + type)
self.target_data_synthetic.append(label)
# pulso
d_stream_types = self.get_name_types(self.pulso_streams)
for ann_dato in self.pulso_annotated_data:
type = d_stream_types[ann_dato['id']]
name = ann_dato['name']
id = ann_dato['id']
annotations = ann_dato['codes']
codes = ''
for code in annotations:
alter_name = code['prefLabel']
name += ' ' + alter_name
annotation = code['id'].split('/')[-1]
ontology_name = self.ontology_names[code['id'].split('/')[-2]]
if ontology_name == 'loinc':
continue
codes += ontology_name + ':' + annotation + '_'
if codes == '':
continue
label = self.assign_codes_label(codes)
# self.train_data_synthetic.append(name + ' ' + type)
self.train_data_synthetic.append(id + ' ' + name + ' ' + type)
self.target_data_synthetic.append(label)
self.save_dict_to_file(self.code_labels, 'code_labels_synth.dict') # TODO: persist in db
@staticmethod
def save_dict_to_file(dict: {}, filename: str):
f = open(filename, 'wb')
pickle.dump(dict, f)
f.close()
@staticmethod
def get_name_types(streams: []) -> dict:
dict_types = {}
for stream in streams:
if stream is None:
continue
client_data = stream['clientData']
dict_types[stream['id']] = list(client_data['sempryv:codes'].keys())[0]
return dict_types
def assign_codes_label(self, codes):
if codes in self.code_labels:
return self.code_labels[codes]
new_label = len(self.code_labels)
self.code_labels[codes] = new_label
return new_label
def train_data(self):
counts = self.count_vect.fit_transform(self.train_data_synthetic)
self.save_model_to_file(self.count_vect, filename='file_vect_synth.joblib')
classifier_model = MultinomialNB().fit(counts, self.target_data_synthetic)
self.save_model_to_file(classifier_model, filename='synthetics_model.joblib')
def persist_model(self, model):
filename = 'semantic/synthetics/synthetics_model.pk'
# os.remove(filename)
file = open(filename, 'wb')
pickle.dump(model, file)
# dump(model, filename)
file.close()
# persist in DB:
table = self.db.ModelsTable
conn = self.db.engine.connect()
conn.execute('insert into ' + table.__tablename__ + '')
rows = self.db.session.query(table.url_id, model.price_with_vat, sibyl_database.DaredevilShopPage.shop_id,
model.last_spider_counter, model.category) \
.filter(model.url_id.in_([u['url_id'] for u in url_ids])) \
.filter(sibyl_database.DaredevilShopPage.id == model.url_id) \
.all()
self.db.session.remove()
@staticmethod
def save_model_to_file(model, filename):
if os.path.exists(path=filename):
os.remove(filename)
file = open(filename, 'wb')
pickle.dump(model, file)
# dump(model, filename)
file.close()
| StarcoderdataPython |
65388 | <reponame>nishantkr18/PettingZoo<filename>pettingzoo/butterfly/pistonball/pistonball.py
import os
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = 'hide'
import pygame
import pymunk
import pymunk.pygame_util
import math
import numpy as np
import gym
from gym.utils import seeding
from pettingzoo import AECEnv
from pettingzoo.utils import agent_selector
from .manual_control import manual_control
from pettingzoo.utils import wrappers
from gym.utils import EzPickle
from pettingzoo.utils.to_parallel import parallel_wrapper_fn
_image_library = {}
def get_image(path):
from os import path as os_path
cwd = os_path.dirname(__file__)
image = pygame.image.load(cwd + '/' + path)
return image
def env(**kwargs):
env = raw_env(**kwargs)
if env.continuous:
env = wrappers.ClipOutOfBoundsWrapper(env)
else:
env = wrappers.AssertOutOfBoundsWrapper(env)
env = wrappers.OrderEnforcingWrapper(env)
return env
parallel_env = parallel_wrapper_fn(env)
class raw_env(AECEnv, EzPickle):
metadata = {'render.modes': ['human', "rgb_array"], 'name': "pistonball_v3"}
def __init__(self, n_pistons=20, local_ratio=0, time_penalty=-0.1, continuous=True, random_drop=True, random_rotate=True, ball_mass=0.75, ball_friction=0.3, ball_elasticity=1.5, max_cycles=125):
EzPickle.__init__(self, n_pistons, local_ratio, time_penalty, continuous, random_drop, random_rotate, ball_mass, ball_friction, ball_elasticity, max_cycles)
self.n_pistons = n_pistons
self.piston_head_height = 11
self.piston_width = 40
self.piston_height = 40
self.piston_body_height = 23
self.piston_radius = 5
self.wall_width = 40
self.ball_radius = 40
self.screen_width = (2 * self.wall_width) + (self.piston_width * self.n_pistons)
self.screen_height = 560
y_high = self.screen_height - self.wall_width - self.piston_body_height
y_low = self.wall_width
obs_height = y_high - y_low
assert self.piston_width == self.wall_width, "Wall width and piston width must be equal for observation calculation"
self.agents = ["piston_" + str(r) for r in range(self.n_pistons)]
self.possible_agents = self.agents[:]
self.agent_name_mapping = dict(zip(self.agents, list(range(self.n_pistons))))
self._agent_selector = agent_selector(self.agents)
self.observation_spaces = dict(
zip(self.agents, [gym.spaces.Box(low=0, high=255, shape=(obs_height, self.piston_width * 3, 3), dtype=np.uint8)] * self.n_pistons))
self.continuous = continuous
if self.continuous:
self.action_spaces = dict(zip(self.agents, [gym.spaces.Box(low=-1, high=1, shape=(1,))] * self.n_pistons))
else:
self.action_spaces = dict(zip(self.agents, [gym.spaces.Discrete(3)] * self.n_pistons))
pygame.init()
pymunk.pygame_util.positive_y_is_up = False
self.renderOn = False
self.screen = pygame.Surface((self.screen_width, self.screen_height))
self.max_cycles = max_cycles
self.piston_sprite = get_image('piston.png')
self.piston_body_sprite = get_image('piston_body.png')
self.background = get_image('background.png')
self.random_drop = random_drop
self.random_rotate = random_rotate
self.pistonList = []
self.pistonRewards = [] # Keeps track of individual rewards
self.recentFrameLimit = 20 # Defines what "recent" means in terms of number of frames.
self.recentPistons = set() # Set of pistons that have touched the ball recently
self.time_penalty = time_penalty
self.local_ratio = local_ratio
self.ball_mass = ball_mass
self.ball_friction = ball_friction
self.ball_elasticity = ball_elasticity
self.done = False
self.pixels_per_position = 4
self.n_piston_positions = 16
self.screen.fill((0, 0, 0))
self.draw_background()
# self.screen.blit(self.background, (0, 0))
self.render_rect = pygame.Rect(
self.wall_width, # Left
self.wall_width, # Top
self.screen_width - (2 * self.wall_width), # Width
self.screen_height - (2 * self.wall_width) - self.piston_body_height # Height
)
# Blit background image if ball goes out of bounds. Ball radius is 40
self.valid_ball_position_rect = pygame.Rect(
self.render_rect.left + self.ball_radius, # Left
self.render_rect.top + self.ball_radius, # Top
self.render_rect.width - (2 * self.ball_radius), # Width
self.render_rect.height - (2 * self.ball_radius) # Height
)
self.frames = 0
self.display_wait = 0.0
self.has_reset = False
self.closed = False
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
def observe(self, agent):
observation = pygame.surfarray.pixels3d(self.screen)
i = self.agent_name_mapping[agent]
# Set x bounds to include 40px left and 40px right of piston
x_high = self.wall_width + self.piston_width * (i + 2)
x_low = self.wall_width + self.piston_width * (i - 1)
y_high = self.screen_height - self.wall_width - self.piston_body_height
y_low = self.wall_width
cropped = np.array(observation[x_low:x_high, y_low:y_high, :])
observation = np.rot90(cropped, k=3)
observation = np.fliplr(observation)
return observation
def enable_render(self):
self.screen = pygame.display.set_mode((self.screen_width, self.screen_height))
self.renderOn = True
# self.screen.blit(self.background, (0, 0))
self.draw_background()
self.draw()
def close(self):
if not self.closed:
self.closed = True
if self.renderOn:
self.screen = pygame.Surface((self.screen_width, self.screen_height))
self.renderOn = False
pygame.event.pump()
pygame.display.quit()
def add_walls(self):
top_left = (self.wall_width, self.wall_width)
top_right = (self.screen_width - self.wall_width, self.wall_width)
bot_left = (self.wall_width, self.screen_height - self.wall_width)
bot_right = (self.screen_width - self.wall_width, self.screen_height - self.wall_width)
walls = [
pymunk.Segment(self.space.static_body, top_left, top_right, 1), # Top wall
pymunk.Segment(self.space.static_body, top_left, bot_left, 1), # Left wall
pymunk.Segment(self.space.static_body, bot_left, bot_right, 1), # Bottom wall
pymunk.Segment(self.space.static_body, top_right, bot_right, 1) # Right
]
for wall in walls:
wall.friction = .64
self.space.add(wall)
def add_ball(self, x, y, b_mass, b_friction, b_elasticity):
mass = b_mass
radius = 40
inertia = pymunk.moment_for_circle(mass, 0, radius, (0, 0))
body = pymunk.Body(mass, inertia)
body.position = x, y
# radians per second
if self.random_rotate:
body.angular_velocity = self.np_random.uniform(-6 * math.pi, 6 * math.pi)
shape = pymunk.Circle(body, radius, (0, 0))
shape.friction = b_friction
shape.elasticity = b_elasticity
self.space.add(body, shape)
return body
def add_piston(self, space, x, y):
piston = pymunk.Body(body_type=pymunk.Body.KINEMATIC)
piston.position = x, y
segment = pymunk.Segment(piston, (0, 0), (self.piston_width - (2 * self.piston_radius), 0), self.piston_radius)
segment.friction = .64
segment.color = pygame.color.THECOLORS["blue"]
space.add(segment)
return piston
def move_piston(self, piston, v):
def cap(y):
maximum_piston_y = self.screen_height - self.wall_width - (self.piston_height - self.piston_head_height)
if y > maximum_piston_y:
y = maximum_piston_y
elif y < maximum_piston_y - (self.n_piston_positions * self.pixels_per_position):
y = maximum_piston_y - (self.n_piston_positions * self.pixels_per_position)
return y
piston.position = (piston.position[0], cap(piston.position[1] - v * self.pixels_per_position))
def reset(self):
self.space = pymunk.Space(threaded=False)
self.add_walls()
# self.space.threads = 2
self.space.gravity = (0.0, 750.0)
self.space.collision_bias = .0001
self.space.iterations = 10 # 10 is default in PyMunk
self.pistonList = []
maximum_piston_y = self.screen_height - self.wall_width - (self.piston_height - self.piston_head_height)
for i in range(self.n_pistons):
# Multiply by 0.5 to use only the lower half of possible positions
possible_y_displacements = np.arange(0, .5 * self.pixels_per_position * self.n_piston_positions, self.pixels_per_position)
piston = self.add_piston(
self.space,
self.wall_width + self.piston_radius + self.piston_width * i, # x position
maximum_piston_y - self.np_random.choice(possible_y_displacements) # y position
)
piston.velociy = 0
self.pistonList.append(piston)
self.horizontal_offset = 0
self.vertical_offset = 0
horizontal_offset_range = 30
vertical_offset_range = 15
if self.random_drop:
self.vertical_offset = self.np_random.randint(-vertical_offset_range, vertical_offset_range + 1)
self.horizontal_offset = self.np_random.randint(-horizontal_offset_range, horizontal_offset_range + 1)
ball_x = (self.screen_width
- self.wall_width
- self.ball_radius
- horizontal_offset_range
+ self.horizontal_offset)
ball_y = (self.screen_height
- self.wall_width
- self.piston_body_height
- self.ball_radius
- (0.5 * self.pixels_per_position * self.n_piston_positions)
- vertical_offset_range
+ self.vertical_offset)
self.ball = self.add_ball(ball_x, ball_y, self.ball_mass, self.ball_friction, self.ball_elasticity)
self.ball.angle = 0
self.ball.velocity = (0, 0)
if self.random_rotate:
self.ball.angular_velocity = self.np_random.uniform(-6 * math.pi, 6 * math.pi)
self.lastX = int(self.ball.position[0] - self.ball_radius)
self.distance = self.lastX - self.wall_width
self.draw_background()
self.draw()
self.agents = self.possible_agents[:]
self._agent_selector.reinit(self.agents)
self.agent_selection = self._agent_selector.next()
self.has_reset = True
self.done = False
self.rewards = dict(zip(self.agents, [0 for _ in self.agents]))
self._cumulative_rewards = dict(zip(self.agents, [0 for _ in self.agents]))
self.dones = dict(zip(self.agents, [False for _ in self.agents]))
self.infos = dict(zip(self.agents, [{} for _ in self.agents]))
self.frames = 0
def draw_background(self):
outer_walls = pygame.Rect(
0, # Left
0, # Top
self.screen_width, # Width
self.screen_height, # Height
)
outer_wall_color = (58, 64, 65)
pygame.draw.rect(self.screen, outer_wall_color, outer_walls)
inner_walls = pygame.Rect(
self.wall_width / 2, # Left
self.wall_width / 2, # Top
self.screen_width - self.wall_width, # Width
self.screen_height - self.wall_width, # Height
)
inner_wall_color = (68, 76, 77)
pygame.draw.rect(self.screen, inner_wall_color, inner_walls)
self.draw_pistons()
def draw_pistons(self):
piston_color = (65, 159, 221)
x_pos = self.wall_width
for piston in self.pistonList:
self.screen.blit(self.piston_body_sprite, (x_pos, self.screen_height - self.wall_width - self.piston_body_height))
# Height is the size of the blue part of the piston. 6 is the piston base height (the gray part at the bottom)
height = self.screen_height - self.wall_width - self.piston_body_height - (piston.position[1] + self.piston_radius) + (self.piston_body_height - 6)
body_rect = pygame.Rect(
piston.position[0] + self.piston_radius + 1, # +1 to match up to piston graphics
piston.position[1] + self.piston_radius + 1,
18,
height
)
pygame.draw.rect(self.screen, piston_color, body_rect)
x_pos += self.piston_width
def draw(self):
# redraw the background image if ball goes outside valid position
if not self.valid_ball_position_rect.collidepoint(self.ball.position):
# self.screen.blit(self.background, (0, 0))
self.draw_background()
ball_x = int(self.ball.position[0])
ball_y = int(self.ball.position[1])
color = (255, 255, 255)
pygame.draw.rect(self.screen, color, self.render_rect)
color = (65, 159, 221)
pygame.draw.circle(self.screen, color, (ball_x, ball_y), self.ball_radius)
line_end_x = ball_x + (self.ball_radius - 1) * np.cos(self.ball.angle)
line_end_y = ball_y + (self.ball_radius - 1) * np.sin(self.ball.angle)
color = (58, 64, 65)
pygame.draw.line(self.screen, color, (ball_x, ball_y), (line_end_x, line_end_y), 3) # 39 because it kept sticking over by 1 at 40
for piston in self.pistonList:
self.screen.blit(self.piston_sprite, (piston.position[0] - self.piston_radius, piston.position[1] - self.piston_radius))
self.draw_pistons()
def get_nearby_pistons(self):
# first piston = leftmost
nearby_pistons = []
ball_pos = int(self.ball.position[0] - self.ball_radius)
closest = abs(self.pistonList[0].position.x - ball_pos)
closest_piston_index = 0
for i in range(self.n_pistons):
next_distance = abs(self.pistonList[i].position.x - ball_pos)
if next_distance < closest:
closest = next_distance
closest_piston_index = i
if closest_piston_index > 0:
nearby_pistons.append(closest_piston_index - 1)
nearby_pistons.append(closest_piston_index)
if closest_piston_index < self.n_pistons - 1:
nearby_pistons.append(closest_piston_index + 1)
return nearby_pistons
def get_local_reward(self, prev_position, curr_position):
local_reward = .5 * (prev_position - curr_position)
return local_reward
def render(self, mode="human"):
if not self.renderOn:
# sets self.renderOn to true and initializes display
self.enable_render()
observation = np.array(pygame.surfarray.pixels3d(self.screen))
pygame.display.flip()
return np.transpose(observation, axes=(1, 0, 2)) if mode == "rgb_array" else None
def step(self, action):
if self.dones[self.agent_selection]:
return self._was_done_step(action)
action = np.asarray(action)
agent = self.agent_selection
if self.continuous:
self.move_piston(self.pistonList[self.agent_name_mapping[agent]], action)
else:
self.move_piston(self.pistonList[self.agent_name_mapping[agent]], action - 1)
self.space.step(1 / 20.0)
if self._agent_selector.is_last():
ball_min_x = int(self.ball.position[0] - self.ball_radius)
if ball_min_x <= self.wall_width + 1:
self.done = True
self.draw()
local_reward = self.get_local_reward(self.lastX, ball_min_x)
# Opposite order due to moving right to left
global_reward = (100 / self.distance) * (self.lastX - ball_min_x)
if not self.done:
global_reward += self.time_penalty
total_reward = [global_reward * (1 - self.local_ratio)] * self.n_pistons # start with global reward
local_pistons_to_reward = self.get_nearby_pistons()
for index in local_pistons_to_reward:
total_reward[index] += local_reward * self.local_ratio
self.rewards = dict(zip(self.agents, total_reward))
self.lastX = ball_min_x
self.frames += 1
else:
self._clear_rewards()
if self.frames >= self.max_cycles:
self.done = True
# Clear the list of recent pistons for the next reward cycle
if self.frames % self.recentFrameLimit == 0:
self.recentPistons = set()
if self._agent_selector.is_last():
self.dones = dict(zip(self.agents, [self.done for _ in self.agents]))
self.agent_selection = self._agent_selector.next()
self._cumulative_rewards[agent] = 0
self._accumulate_rewards()
# Game art created by <NAME>
| StarcoderdataPython |
1824961 | import json
import gzip
import sys
import os
import socket
from io import BytesIO
from .utils import timestamp, base64_encode
from .runtime import runtime_info
if runtime_info.PYTHON_2:
from urllib2 import urlopen
from urllib2 import Request
from urllib import urlencode
else:
from urllib.request import urlopen
from urllib.request import Request
from urllib.parse import urlencode
class APIRequest:
def __init__(self, agent):
self.agent = agent
def post(self, endpoint, payload):
agent_key_64 = base64_encode(self.agent.get_option('agent_key') + ':').replace('\n', '')
headers = {
'Accept-Encoding': 'gzip',
'Authorization': "Basic %s" % agent_key_64,
'Content-Type': 'application/json',
'Content-Encoding': 'gzip'
}
host_name = 'undefined'
try:
host_name = socket.gethostname()
except Exception:
self.agent.exception()
req_body = {
'runtime_type': 'python',
'runtime_version': '{0.major}.{0.minor}.{0.micro}'.format(sys.version_info),
'agent_version': self.agent.AGENT_VERSION,
'app_name': self.agent.get_option('app_name'),
'app_version': self.agent.get_option('app_version'),
'app_environment': self.agent.get_option('app_environment'),
'host_name': self.agent.get_option('host_name', socket.gethostname()),
'process_id': os.getpid(),
'run_id': self.agent.run_id,
'run_ts': self.agent.run_ts,
'sent_at': timestamp(),
'payload': payload,
}
gzip_out = BytesIO()
with gzip.GzipFile(fileobj=gzip_out, mode="w") as f:
f.write(json.dumps(req_body).encode('utf-8'))
f.close()
gzip_out_val = gzip_out.getvalue()
if isinstance(gzip_out_val, str):
req_body_gzip = bytearray(gzip_out.getvalue())
else:
req_body_gzip = gzip_out.getvalue()
request = Request(
url = self.agent.get_option('dashboard_address') + '/agent/v1/' + endpoint,
data = req_body_gzip,
headers = headers)
response = urlopen(request, timeout = 20)
result_data = response.read()
if response.info():
content_type = response.info().get('Content-Encoding')
if content_type == 'gzip':
result_data = gzip.GzipFile('', 'r', 0, BytesIO(result_data)).read()
response.close()
return json.loads(result_data.decode('utf-8'))
def python_version():
[sys.version_info.major,'',sys.version_info.minor + sys.version_info.micro]
| StarcoderdataPython |
8091298 | <reponame>styam/coading_practice
"""
Write a Python program to create a lambda function that adds 15 to a given number passed in as an argument,
also create a lambda function that multiplies argument x with argument y and print the result.
"""
add_number = lambda x:x+15
multiplies = lambda x, y:x*y
print(add_number(5))
print(multiplies(20, 5)) | StarcoderdataPython |
83713 | """
Will run "shellfoundry install" on all shell subdirectories of current folder
"""
import os
import subprocess
def install_shell(dir_name):
my_path = os.path.abspath(__file__)
mydir = os.path.dirname(my_path)
shell_dir_path = os.path.join(mydir, dir_name)
subprocess.call(["shellfoundry", "install"], cwd=shell_dir_path)
print("===========================")
directories_in_curdir = [d for d in os.listdir(os.curdir) if os.path.isdir(d)]
if not directories_in_curdir:
raise Exception("No subdirectories in this folder")
for shell_dir in directories_in_curdir:
install_shell(shell_dir)
| StarcoderdataPython |
12827303 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Licensed under a MIT style license - see LICENSE.rst
""" Access spectroscopic data for a single BOSS target.
"""
from __future__ import division, print_function
from six import binary_type
import re
import numpy as np
import numpy.ma
import fitsio
import astropy.table
import bossdata.raw
def get_fiducial_pixel_index(wavelength):
"""
Convert a wavelength to a fiducial pixel index.
The fiducial wavelength grid used by all SDSS co-added spectra is
logarithmically spaced::
wavelength = wavelength0 * 10**(coef * index)
The value ``coef = 1e-4`` is encoded in the FITS HDU headers of SDSS
coadded data files with the keyword ``CD1_1`` (and sometimes also
``COEFF1``). The value of ``wavelength0`` defines ``index = 0`` and is
similarly encoded as ``CRVAL1`` (and sometimes also ``COEFF0``). However,
its value is not constant between different SDSS co-added spectra because
varying amounts of invalid data are trimmed. This function adopts the
constant value 3500.26 Angstrom corresponding to ``index = 0``:
>>> get_fiducial_pixel_index(3500.26)
0.0
Note that the return value is a float so that wavelengths not on the
fiducial grid can be converted and detected:
>>> get_fiducial_pixel_index(3500.5)
0.29776960129179741
The calculation is automatically broadcast over an input wavelength array:
>>> wlen = np.arange(4000,4400,100)
>>> get_fiducial_pixel_index(wlen)
array([ 579.596863 , 686.83551692, 791.4898537 , 893.68150552])
Use :attr:`fiducial_pixel_index_range` for an index range that covers all
SDSS spectra and :attr:`fiducial_loglam` to covert integer indices to
wavelengths.
Args:
wavelength(float): Input wavelength in Angstroms.
Returns:
numpy.ndarray: Array of floating-point indices relative to the fiducial
wavelength grid.
"""
return (np.log10(wavelength) - _fiducial_log10lam0)/_fiducial_coef
_fiducial_coef = 1e-4
_fiducial_log10lam0 = np.log10(3500.26)
fiducial_pixel_index_range = (0, 4800)
"""
Range of fiducial pixel indices that covers all spectra.
Use :func:`get_fiducial_pixel_index` to calculate fiducial pixel indices.
"""
fiducial_loglam = (_fiducial_log10lam0 +
_fiducial_coef * np.arange(*fiducial_pixel_index_range))
"""
Array of fiducial log10(wavelength in Angstroms) covering all spectra.
Lookup the log10(wavelength) or wavelength corresponding to a particular
integral pixel index using:
>>> fiducial_loglam[100]
3.554100305027835
>>> 10**fiducial_loglam[100]
3581.7915291606305
The bounding wavelengths of this range are:
>>> 10**fiducial_loglam[[0,-1]]
array([ 3500.26 , 10568.18251472])
The :meth:`SpecFile.get_valid_data` and :meth:`PlateFile.get_valid_data()
<bossdata.plate.PlateFile.get_valid_data>` methods provide a ``fiducial_grid``
option that returns data using this grid.
"""
class Exposures(object):
"""Table of exposure info extracted from FITS header keywords.
Parse the NEXP and EXPIDnn keywords that are present in the header of HDU0
in :datamodel:`spPlate <PLATE4/spPlate>` and :datamodel:`spec
<spectra/PLATE4/spec>` FITS files.
The constructor initializes the ``table`` attribute with column names
``offset``, ``camera``, ``science``, ``flat`` and ``arc``, and creates one
row for each keyword EXPIDnn, where ``offset`` equals the keyword sequence
number nn, ``camera`` is one of b1, b2, r1, r2, and the remaining columns
record the science and calibration exposure numbers.
Use :meth:`get_info` to retrieve the n-th exposure for a particular camera
(b1, b2, r1, r2). Note that when this class is initialized from a
:datamodel:`spec file <spectra/PLATE4/spec>` header, it will only describe
the two cameras of a single spectrograph (b1+r1 or b2+r2). The `num_by_camera`
attribute is a dictionary of ints indexed by camera that records the number
of science exposures available for that camera.
Args:
header(dict): dictionary of FITS header keyword, value pairs.
Returns:
"""
def __init__(self, header):
num_exposures = header['NEXP']
expid_pattern = re.compile('([br][12])-([0-9]{8})-([0-9]{8})-([0-9]{8})')
exposure_set = set()
self.table = astropy.table.Table(
names=('offset', 'camera', 'science', 'flat', 'arc'),
dtype=('i4', 'S2', 'i4', 'i4', 'i4'))
self.num_by_camera = dict(b1=0, b2=0, r1=0, r2=0)
for i in range(num_exposures):
camera, science_num, flat_num, arc_num = expid_pattern.match(
header['EXPID{0:02d}'.format(i + 1)]).groups()
self.table.add_row((i, camera, int(science_num), int(flat_num), int(arc_num)))
exposure_set.add(int(science_num))
self.num_by_camera[camera] += 1
self.sequence = sorted(exposure_set)
# Check that the science exposures listed for each camera are self consistent.
num_exposures = len(self.sequence)
for camera in ('b1', 'b2', 'r1', 'r2'):
if self.num_by_camera[camera] == 0:
continue
if self.num_by_camera[camera] != num_exposures:
raise RuntimeError('Found {} {} exposures but expected {}.'.format(
self.num_by_camera[camera], camera, num_exposures))
# Conversion to binary_type is needed for backwards compatibility with
# astropy < 2.0 and python 3. For details, see:
# http://docs.astropy.org/en/stable/table/access_table.html#bytestring-columns-python-3
camera_rows = self.table['camera'] == binary_type(camera, 'ascii')
camera_exposures = set(self.table[camera_rows]['science'])
if camera_exposures != exposure_set:
raise RuntimeError('Found inconsistent {} exposures: {}. Expected: {}.'.format(
camera, camera_exposures, exposure_set))
def get_info(self, exposure_index, camera):
"""Get information about a single camera exposure.
Args:
exposure_index(int): The sequence number for the requested camera
exposure, in the range 0 - `(num_exposures[camera]-1)`.
camera(str): One of b1,b2,r1,r2.
Returns:
A structured array with information about the requested exposure,
corresponding to one row of our ``table`` attribute.
Raises:
ValueError: Invalid exposure_index or camera.
RuntimeError: Exposure not present.
"""
if camera not in ('b1', 'b2', 'r1', 'r2'):
raise ValueError(
'Invalid camera "{}", expected b1, b2, r1, or r2.'.format(camera))
if self.num_by_camera[camera] == 0:
raise ValueError('There are no {} exposures available.'.format(camera))
if exposure_index < 0 or exposure_index >= self.num_by_camera[camera]:
raise ValueError('Invalid exposure_index {}, expected 0-{}.'.format(
exposure_index, self.num_by_camera[camera] - 1))
science_num = self.sequence[exposure_index]
row = (self.table['science'] == science_num) & (
self.table['camera'] == binary_type(camera, 'ascii'))
if not np.any(row):
# This should never happen after our self-consistency checks in the ctor.
raise RuntimeError('No exposure[{}] = {:08d} found for {}.'.format(
exposure_index, science_num, camera))
if np.count_nonzero(row) > 1:
# This should never happen after our self-consistency checks in the ctor.
raise RuntimeError(
'Found multiple {} exposures[{}].'.format(camera, exposure_index))
return self.table[row][0]
def get_exposure_name(self, exposure_index, camera, ftype='spCFrame'):
"""Get the file name of a single science or calibration exposure data product.
Use the exposure name to locate FITS data files associated with
individual exposures. The supported file types are:
:datamodel:`spCFrame <PLATE4/spCFrame>`,
:datamodel:`spFrame <PLATE4/spFrame>`,
:datamodel:`spFluxcalib <PLATE4/spFluxcalib>`
:datamodel:`spFluxcorr <PLATE4/spFluxcorr>`,
:datamodel:`spArc <PLATE4/spArc>`,
:datamodel:`spFlat <PLATE4/spFlat>`. This method is analogous to
:meth:`bossdata.plate.Plan.get_exposure_name`, but operates for a single
target and only knows about exposures actually used in the final co-add
(including the associated arc and flat exposures).
Args:
exposure_index(int): The sequence number for the requested camera
exposure, in the range 0 - `(num_exposures[camera]-1)`.
camera(str): One of b1,b2,r1,r2.
ftype(str): Type of exposure file whose name to return. Must be one of
spCFrame, spFrame, spFluxcalib, spFluxcorr, spArc, spFlat. An spCFrame
is assumed to be uncompressed, and all other files are assumed to be
compressed. When a calibration is requested (spArc, spFlat) results from
the calibration exposure used to analyze the specified science exposure
is returned.
Returns:
str: Exposure name of the form [ftype]-[cc]-[eeeeeeee].[ext] where [cc]
identifies the camera (one of b1,r1,b2,r2) and [eeeeeeee] is the
zero-padded arc/flat/science exposure number. The extension [ext]
is "fits" for spCFrame files and "fits.gz" for all other file types.
Raises:
ValueError: one of the inputs is invalid.
"""
if camera not in ('b1', 'b2', 'r1', 'r2'):
raise ValueError(
'Invalid camera "{}", expected b1, b2, r1, or r2.'.format(camera))
if exposure_index < 0 or exposure_index >= self.num_by_camera[camera]:
raise ValueError('Invalid exposure_index {}, expected 0-{}.'.format(
exposure_index, self.num_by_camera[camera] - 1))
ftypes = ('spCFrame', 'spFrame', 'spFluxcalib', 'spFluxcorr', 'spArc', 'spFlat')
if ftype not in ftypes:
raise ValueError('Invalid file type ({}) must be one of: {}.'
.format(ftype, ', '.join(ftypes)))
# Get the science exposure ID number for the requested seqence number 0,1,...
exposure_info = self.get_info(exposure_index, camera)
if ftype == 'spArc':
exposure_id = exposure_info['arc']
elif ftype == 'spFlat':
exposure_id = exposure_info['flat']
else:
exposure_id = exposure_info['science']
name = '{0}-{1}-{2:08d}.fits'.format(ftype, camera, exposure_id)
if ftype != 'spCFrame':
name += '.gz'
return name
def get_raw_image(self, plate, mjd, exposure_index, camera, flavor='science',
finder=None, mirror=None):
"""Get the raw image file associated with an exposure.
Args:
plate(int): Plate number, which must be positive.
mjd(int): Modified Julian date of the observation, which must be > 45000.
exposure_index(int): The sequence number for the requested camera
exposure, in the range 0 - `(num_exposures[camera]-1)`.
camera(str): One of b1,b2,r1,r2.
flavor(str): One of science, arc, flat.
finder(bossdata.path.Finder): Object used to find the names of BOSS data files.
If not specified, the default Finder constructor is used.
mirror(bossdata.remote.Manager): Object used to interact with the local mirror
of BOSS data. If not specified, the default Manager constructor is used.
Returns:
bossdata.raw.RawImageFile: requested raw image file.
Raises:
ValueError: one of the inputs is invalid.
"""
if plate < 0:
raise ValueError('Invalid plate number ({}) must be > 0.'.format(plate))
if mjd <= 45000:
raise ValueError('Invalid mjd ({}) must be >= 45000.'.format(mjd))
if camera not in ('b1', 'b2', 'r1', 'r2'):
raise ValueError(
'Invalid camera "{}". Expected one of b1, b2, r1, r2.'.format(camera))
if exposure_index < 0 or exposure_index >= self.num_by_camera[camera]:
raise ValueError('Invalid exposure_index {}, expected 0-{}.'.format(
exposure_index, self.num_by_camera[camera] - 1))
if flavor not in ('science', 'arc', 'flat'):
raise ValueError(
'Invalid flavor "{}". Expected one of science, arc, flat.')
exposure_info = self.get_info(exposure_index, camera)
exposure_id = exposure_info[flavor]
# Load the co-add plan to determine the observation MJD for this exposure,
# which is generally different (earlier) than the MJD assigned to the coadd.
# There are other ways to do this, but this requires the smallest download.
if finder is None:
finder = bossdata.path.Finder()
plan_path = finder.get_plate_plan_path(plate, mjd, combined=True)
if mirror is None:
mirror = bossdata.remote.Manager()
plan = bossdata.plate.Plan(mirror.get(plan_path))
# Find the observation MJD of the requested science exposure.
found = plan.exposure_table['exp'] == exposure_info['science']
if np.count_nonzero(found) != 1:
raise RuntimeError('Cannot locate science exposure in plan.')
obs_mjd = plan.exposure_table[found][0]['mjd']
path = mirror.get(finder.get_raw_path(obs_mjd, camera, exposure_id))
return bossdata.raw.RawImageFile(path)
class SpecFile(object):
""" A BOSS spec file containing summary data for a single target.
A :datamodel:`spec file <spec>` contains co-added spectra for a single target of an
observation. This class supports the full version described in the data model as
well as a :datamodel:`lite version <spectra/lite/PLATE4/spec>` that does not contain
the per-exposure HDUs with indices >= 4. Use the `lite` attribute to detect which
version an object represents.
To read all co-added spectra of an observation use :class:`bossdata.plate.PlateFile`.
Individual exposures of a half-plate can be read using :class:`bossdata.plate.FrameFile`.
The ``plate``, ``mjd`` and ``fiber`` attributes specify the target observation.
The ``info`` attribute contains this target's row from :datamodel:`spAll <spAll>`
as a structured numpy array, so its metadata can be accessed as ``info['OBJTYPE']``,
etc.
Use :meth:`get_valid_data` to access this target's spectra, or the :class:`exposures
<Exposures>` attribute for a list of exposures used in the coadd (see
:class:`bossdata.plate.Plan` for alternative information about the exposures used in
a coadd.) The ``num_exposures`` attribute gives the number of science exposures used
for this target's co-added spectrum (counting a blue+red pair as one exposure). Use
:meth:`get_exposure_name` to locate files associated the individual exposures used
for this co-added spectrum.
This class is only intended for reading the BOSS spec file format, so generic
operations on spectroscopic data (redshifting, resampling, etc) are intentionally not
included here, but are instead provided in the `speclite
<http://speclite.readthedocs.org>`__ package.
Args:
path(str): Local path of the spec FITS file to use. This should normally be obtained
via :meth:`bossdata.path.Finder.get_spec_path` and can be automatically mirrored
via :meth:`bossdata.remote.Manager.get` or using the :ref:`bossfetch` script. The
file is opened in read-only mode so you do not need write privileges.
"""
def __init__(self, path):
self.hdulist = fitsio.FITS(path, mode=fitsio.READONLY)
self.lite = (len(self.hdulist) == 4)
self.header = self.hdulist[0].read_header()
# Look up the available exposures.
self.exposures = Exposures(self.header)
self.num_exposures = len(self.exposures.sequence)
# Extract our plate-mjd-fiber values.
self.plate, self.mjd, self.fiber = (
self.hdulist[2]['PLATE', 'MJD', 'FIBERID'][0][0])
# We don't use bossdata.plate.get_num_fibers here to avoid a circular import.
num_fibers = 640 if self.plate < 3510 else 1000
# Calculate the camera (b1/b2/r1/r2) for this target's fiber.
self.spec_id = '1' if self.fiber <= num_fibers // 2 else '2'
def get_exposure_name(self, sequence_number, band, ftype='spCFrame'):
"""Get the file name of a single science exposure data product.
Use the exposure name to locate FITS data files associated with
individual exposures. The supported file types are:
:datamodel:`spCFrame <PLATE4/spCFrame>`,
:datamodel:`spFrame <PLATE4/spFrame>`,
:datamodel:`spFluxcalib <PLATE4/spFluxcalib>` and
:datamodel:`spFluxcorr <PLATE4/spFluxcorr>`. This method is analogous to
:meth:`bossdata.plate.Plan.get_exposure_name`, but operates for a single
target and only knows about exposures actually used in the final co-add.
Args:
sequence_number(int): Science exposure sequence number, counting from zero.
Must be less than our num_exposures attribute.
band(str): Must be 'blue' or 'red'.
ftype(str): Type of exposure file whose name to return. Must be one of
spCFrame, spFrame, spFluxcalib, spFluxcorr. An spCFrame is assumed
to be uncompressed, and all other files are assumed to be compressed.
Returns:
str: Exposure name of the form [ftype]-[cc]-[eeeeeeee].[ext] where [cc]
identifies the camera (one of b1,r1,b2,r2) and [eeeeeeee] is the
zero-padded exposure number. The extension [ext] is "fits" for
spCFrame files and "fits.gz" for all other file types.
Raises:
ValueError: one of the inputs is invalid.
"""
if band not in ('blue', 'red'):
raise ValueError('Invalid band "{}". Expected blue or red.'.format(band))
camera = band[0] + self.spec_id
return self.exposures.get_exposure_name(sequence_number, camera, ftype)
def get_raw_image(self, sequence_number, band, flavor='science',
finder=None, mirror=None):
"""Get a raw image file associated with one of this coadd's exposures.
Args:
sequence_number(int): The sequence number for the requested camera
exposure, in the range 0 - `(num_exposures[camera]-1)`.
band(str): Must be 'blue' or 'red'.
flavor(str): One of science, arc, flat.
finder(bossdata.path.Finder): Object used to find the names of BOSS data files.
If not specified, the default Finder constructor is used.
mirror(bossdata.remote.Manager): Object used to interact with the local mirror
of BOSS data. If not specified, the default Manager constructor is used.
Returns:
bossdata.raw.RawImageFile: requested raw image file.
Raises:
ValueError: one of the inputs is invalid.
"""
if band not in ('blue', 'red'):
raise ValueError('Invalid band "{}". Expected blue or red.'.format(band))
camera = band[0] + self.spec_id
return self.exposures.get_raw_image(self.plate, self.mjd, sequence_number, camera,
flavor, finder, mirror)
def get_exposure_hdu(self, exposure_index, camera):
"""Lookup the HDU for one exposure.
This method will not work on "lite" files, which do not include individual
exposures.
Args:
exposure_index(int): Individual exposure to use, specified as a sequence number
starting from zero, for the first exposure, and increasing up to
`self.num_exposures-1`.
camera(str): Which camera to use. Must be one of b1,b2,r1,r2.
Returns:
hdu: The HDU containing data for the requested exposure.
Raises:
RuntimeError: individual exposures not available in lite file.
"""
if self.lite:
raise RuntimeError('individual exposures not available in lite file.')
info = self.exposures.get_info(exposure_index, camera)
return self.hdulist[4 + info['offset']]
def get_pixel_mask(self, exposure_index=None, camera=None):
"""Get the pixel mask for a specified exposure or the combined coadd.
Returns the `and_mask` for coadded spectra. The entire mask is returned, including
any pixels with zero inverse variance.
Args:
exposure_index(int): Individual exposure to use, specified as a sequence number
starting from zero, for the first exposure, and increasing up to
`self.num_exposures-1`. Uses the co-added spectrum when the value is None.
camera(str): Which camera to use. Must be either 'b1', 'b2' (blue) or 'r1', 'r2'
(red) unless exposure_index is None, in which case this argument is ignored.
Returns:
numpy.ndarray: Array of integers, one per pixel, encoding the mask bits defined
in :attr:`bossdata.bits.SPPIXMASK` (see also
http://www.sdss3.org/dr10/algorithms/bitmask_sppixmask.php).
"""
if exposure_index is None:
hdu = self.hdulist[1]
return hdu['and_mask'][:]
else:
hdu = self.get_exposure_hdu(exposure_index, camera)
return hdu['mask'][:]
def get_valid_data(self, exposure_index=None, camera=None, pixel_quality_mask=None,
include_wdisp=False, include_sky=False, use_ivar=False,
use_loglam=False, fiducial_grid=False):
"""Get the valid data for a specified exposure or the combined coadd.
You will probably find yourself using this idiom often::
data = spec.get_valid_data(...)
wlen,flux,dflux = data['wavelength'][:],data['flux'][:],data['dflux'][:]
Args:
exposure_index(int): Individual exposure to use, specified as a sequence number
starting from zero, for the first exposure, and increasing up to
`self.num_exposures-1`. Uses the co-added spectrum when the value is None.
camera(str): Which camera to use. Must be either 'b1', 'b2' (blue) or 'r1', 'r2'
(red) unless exposure_index is None, in which case this argument is ignored.
pixel_quality_mask(int): An integer value interpreted as a bit pattern using the
bits defined in :attr:`bossdata.bits.SPPIXMASK` (see also
http://www.sdss3.org/dr10/algorithms/bitmask_sppixmask.php). Any bits set in
this mask are considered harmless and the corresponding spectrum pixels are
assumed to contain valid data. When accessing the coadded spectrum, this mask
is applied to the AND of the masks for each individual exposure. No mask is
applied if this value is None.
include_wdisp: Include a wavelength dispersion column in the returned data.
include_sky: Include a sky flux column in the returned data.
use_ivar: Replace ``dflux`` with ``ivar`` (inverse variance) in the returned
data.
use_loglam: Replace ``wavelength`` with ``loglam`` (``log10(wavelength)``) in
the returned data.
fiducial_grid: Return co-added data using the :attr:`fiducial wavelength grid
<fiducial_loglam>`. If False, the returned array uses
the native grid of the SpecFile, which generally trims pixels on both ends
that have zero inverse variance. Set this value True to ensure that all
co-added spectra use aligned wavelength grids when this matters.
Returns:
numpy.ma.MaskedArray: Masked array of per-pixel records. Pixels with no valid data
are included but masked. The record for each pixel has at least the following
named fields: wavelength in Angstroms (or loglam), flux and dflux in 1e-17
ergs/s/cm2/Angstrom (or flux and ivar). Wavelength values are strictly
increasing and dflux is calculated as ivar**-0.5 for pixels with valid data.
Optional fields are wdisp in constant-log10-lambda pixels and sky in 1e-17
ergs/s/cm2/Angstrom. The wavelength (or loglam) field is never masked and
all other fields are masked when ivar is zero or a pipeline flag is set (and
not allowed by ``pixel_quality_mask``).
Raises:
ValueError: fiducial grid is not supported for individual exposures.
RuntimeError: co-added wavelength grid is not aligned with the fiducial grid.
"""
# Look up the HDU for this spectrum and its pixel quality bitmap.
if exposure_index is None:
hdu = self.hdulist[1]
pixel_bits = hdu['and_mask'][:]
else:
hdu = self.get_exposure_hdu(exposure_index, camera)
pixel_bits = hdu['mask'][:]
if fiducial_grid:
if exposure_index is not None:
raise ValueError('Fiducial grid not supported for individual exposures.')
loglam = fiducial_loglam
first_index = float(get_fiducial_pixel_index(10.0**hdu['loglam'][0]))
if abs(first_index - round(first_index)) > 0.01:
raise RuntimeError('Wavelength grid not aligned with fiducial grid.')
first_index = int(round(first_index))
trimmed = slice(first_index, first_index + len(pixel_bits))
else:
loglam = hdu['loglam'][:]
trimmed = slice(None)
num_pixels = len(loglam)
# Apply the pixel quality mask, if any.
if pixel_quality_mask is not None:
clear_allowed = np.bitwise_not(np.uint32(pixel_quality_mask))
pixel_bits = np.bitwise_and(pixel_bits, clear_allowed)
# Identify the pixels with valid data.
ivar = hdu['ivar'][:]
bad_pixels = (pixel_bits != 0) | (ivar <= 0.0)
good_pixels = ~bad_pixels
# Create and fill the unmasked structured array of data.
dtype = [('loglam' if use_loglam else 'wavelength', np.float32),
('flux', np.float32), ('ivar' if use_ivar else 'dflux', np.float32)]
if include_wdisp:
dtype.append(('wdisp', np.float32))
if include_sky:
dtype.append(('sky', np.float32))
data = np.zeros(num_pixels, dtype=dtype)
if use_loglam:
data['loglam'][:] = loglam
else:
data['wavelength'][:] = np.power(10.0, loglam)
data['flux'][trimmed][:] = hdu['flux'][:]
if use_ivar:
data['ivar'][trimmed][good_pixels] = ivar[good_pixels]
else:
data['dflux'][trimmed][good_pixels] = 1.0 / np.sqrt(ivar[good_pixels])
if include_wdisp:
data['wdisp'][trimmed] = hdu['wdisp'][:]
if include_sky:
data['sky'][trimmed] = hdu['sky'][:]
if fiducial_grid:
mask = np.ones(num_pixels, dtype=bool)
mask[trimmed][:] = bad_pixels
else:
mask = bad_pixels
result = numpy.ma.MaskedArray(data, mask=mask)
# Wavelength values are always valid.
result['loglam' if use_loglam else 'wavelength'].mask = False
return result
| StarcoderdataPython |
1718851 | <gh_stars>0
VERSION='0.8.1'
| StarcoderdataPython |
1661206 | import os
import MaxwellConstruction as mx
import numpy as np
import matplotlib.pyplot as plt
import argparse
def run_pr_case( a_eos, b_eos, w_eos, sigma, beta, TrList ):
"""
Ejecucion de casos de construccion de Maxwell
"""
# Directorio del caso y limpieza
main_dir = os.getcwd()
cases_dir = main_dir + '/a_{:.4f}_b_{:.4f}_w_{:.4f}_sigma_{:.4f}_beta_{:.4f}/'.format(a_eos, b_eos, w_eos, sigma, beta)
os.system('rm -rf ' + cases_dir )
os.system('mkdir -p ' + cases_dir )
# Ecuacion de estado
prob = mx.EOS('Peng-Robinson',a=a_eos,b=b_eos,w=w_eos)
# Ejecucion para cada T reducida
for Tr in TrList:
# Directorio de ejecucion
os.chdir( cases_dir )
case_name = 'Tr_{:.3f}'.format(Tr)
os.system('cp -r ../Base ' + case_name )
os.chdir( cases_dir + '/' + case_name)
# Propiedades de coexistencia
step = 0.999
if Tr < 0.6:
step = 0.9999
if Tr >= 0.8:
Vrmin,Vrmax = mx.coexistencia(prob, Tr, plotPV=False, Vspace=(0.3,50,10000), step_size=step)
else:
Vrmin,Vrmax = mx.coexistencia(prob, Tr, plotPV=False, Vspace=(0.28,4000,200000), step_size=step)
# Reemplazo de propiedades
os.system('sed -i \'s/sigmaReplace/{:.5g}/g\' Allrun'.format(args.sigma))
os.system('sed -i \'s/beta_pr_replace/{:.5g}/g\' properties/macroProperties'.format(args.beta))
os.system('sed -i \'s/a_pr_replace/{:.5g}/g\' properties/macroProperties'.format(args.a))
os.system('sed -i \'s/b_pr_replace/{:.5g}/g\' properties/macroProperties'.format(args.b))
os.system('sed -i \'s/w_pr_replace/{:.5g}/g\' properties/macroProperties'.format(args.w))
os.system('sed -i \'s/T_pr_replace/{:.7g}/g\' start/initialFields'.format(Tr*prob.Tc()))
os.system('sed -i \'s/rhomin_pr_replace/{:.7g}/g\' start/initialFields'.format(prob.rhoc()/Vrmax))
os.system('sed -i \'s/rhomax_pr_replace/{:.7g}/g\' start/initialFields'.format(prob.rhoc()/Vrmin))
# Ejecucion
print('Tr = {}'.format(Tr))
os.system('./Allclean > log.Allclean')
os.system('./Allpre > log.Allpre')
os.system('./Allrun > log.lbm')
os.chdir(main_dir)
pass
if __name__ == "__main__":
# Argumentos de consola
parser = argparse.ArgumentParser(description='Resolución del problema de construcción de Maxwell para diferentes constantes')
parser.add_argument('-a', help='Constante a de PR', type=float, default = 1./50.)
parser.add_argument('-b', help='Constante b de PR', type=float, default = 2./21.)
parser.add_argument('-w', help='Constante w de PR', type=float, default = 0.5)
parser.add_argument('-beta', help='Constante beta de fuerza de interaccion mixta', type=float, default = 1.25)
parser.add_argument('-sigma', help='Constante sigma', type=float, default = 0.125)
args = parser.parse_args()
# Ejecucion de casos
Tr = [0.95, 0.90, 0.85, 0.80, 0.75, 0.70, 0.65]
run_pr_case( args.a, args.b, args.w, args.sigma, args.beta, Tr )
| StarcoderdataPython |
3275705 | from rest_framework import serializers
from presentation.models import Follower
class FollowerSerializer(serializers.ModelSerializer):
class Meta:
model = Follower
fields = ['type', 'owner', 'items']
| StarcoderdataPython |
6505449 | #!/usr/bin/env python
import time
import asyncio
import logging
from typing import (
Any,
AsyncIterable,
List,
Optional,
)
from hummingbot.core.data_type.user_stream_tracker_data_source import UserStreamTrackerDataSource
from hummingbot.logger import HummingbotLogger
from .peatio_constants import Constants
from .peatio_auth import PeatioAuth
from .peatio_websocket import PeatioWebsocket
class PeatioAPIUserStreamDataSource(UserStreamTrackerDataSource):
_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._logger is None:
cls._logger = logging.getLogger(__name__)
return cls._logger
def __init__(self, peatio_auth: PeatioAuth, trading_pairs: Optional[List[str]] = []):
self._peatio_auth: PeatioAuth = peatio_auth
self._trading_pairs = trading_pairs
self._current_listen_key = None
self._listen_for_user_stream_task = None
self._last_recv_time: float = 0
self._ws: PeatioWebsocket = None
super().__init__()
@property
def last_recv_time(self) -> float:
return self._last_recv_time
@property
def is_connected(self):
return self._ws.is_connected if self._ws is not None else False
async def _listen_to_orders_trades_balances(self) -> AsyncIterable[Any]:
"""
Subscribe to active orders via web socket
"""
try:
self._ws = PeatioWebsocket(self._peatio_auth)
await self._ws.connect()
await self._ws.subscribe(Constants.WS_SUB["USER_ORDERS_TRADES"])
async for msg in self._ws.on_message():
# print(f"user msg: {msg}")
self._last_recv_time = time.time()
if msg is not None:
yield msg
except Exception as e:
raise e
finally:
await self._ws.disconnect()
await asyncio.sleep(5)
async def listen_for_user_stream(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue) -> AsyncIterable[Any]:
"""
*required
Subscribe to user stream via web socket, and keep the connection open for incoming messages
:param ev_loop: ev_loop to execute this function in
:param output: an async queue where the incoming messages are stored
"""
while True:
try:
async for msg in self._listen_to_orders_trades_balances():
output.put_nowait(msg)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error(
f"Unexpected error with {Constants.EXCHANGE_NAME} WebSocket connection. "
"Retrying after 30 seconds...", exc_info=True)
await asyncio.sleep(30.0)
| StarcoderdataPython |
3459121 | <reponame>zhenglab/cxxnet<filename>example/MNIST/mnist.py<gh_stars>1-10
import sys
sys.path.append('../../wrapper/')
import cxxnet
import numpy as np
data = cxxnet.DataIter("""
iter = mnist
path_img = "./data/train-images-idx3-ubyte.gz"
path_label = "./data/train-labels-idx1-ubyte.gz"
shuffle = 1
iter = end
input_shape = 1,1,784
batch_size = 100
""")
print 'init data iter'
deval = cxxnet.DataIter("""
iter = mnist
path_img = "./data/t10k-images-idx3-ubyte.gz"
path_label = "./data/t10k-labels-idx1-ubyte.gz"
iter = end
input_shape = 1,1,784
batch_size = 100
""")
cfg = """
netconfig=start
layer[+1:fc1] = fullc:fc1
nhidden = 100
init_sigma = 0.01
layer[+1:sg1] = sigmoid:se1
layer[sg1->fc2] = fullc:fc2
nhidden = 10
init_sigma = 0.01
layer[+0] = softmax
netconfig=end
input_shape = 1,1,784
batch_size = 100
random_type = gaussian
"""
param = {}
param['eta'] = 0.1
param['dev'] = 'cpu'
param['momentum'] = 0.9
param['metric[label]'] = 'error'
net = cxxnet.train(cfg, data, 1, param, eval_data = deval)
weights = []
for layer in ['fc1', 'fc2']:
for tag in ['wmat', 'bias']:
weights.append((layer, tag, net.get_weight(layer, tag)))
data.before_first()
data.next()
# extract
print 'predict'
pred = net.predict(data)
print 'predict finish'
dbatch = data.get_data()
print dbatch.shape
print 'get data'
pred2 = net.predict(dbatch)
print np.sum(np.abs(pred - pred2))
print np.sum(np.abs(net.extract(data, 'sg1') - net.extract(dbatch, 'sg1')))
# evaluate
deval.before_first()
werr = 0
wcnt = 0
while deval.next():
label = deval.get_label()
pred = net.predict(deval)
werr += np.sum(label[:,0] != pred[:])
wcnt += len(label[:,0])
print 'eval-error=%f' % (float(werr) / wcnt)
# training
data.before_first()
while data.next():
label = data.get_label()
batch = data.get_data()
net.update(batch, label)
# evaluate
deval.before_first()
werr = 0
wcnt = 0
while deval.next():
label = deval.get_label()
pred = net.predict(deval)
werr += np.sum(label[:,0] != pred[:])
wcnt += len(label[:,0])
print 'eval-error2=%f' % (float(werr) / wcnt)
for layer, tag, wt in weights:
net.set_weight(wt, layer, tag)
# evaluate
deval.before_first()
werr = 0
wcnt = 0
while deval.next():
label = deval.get_label()
pred = net.predict(deval)
werr += np.sum(label[:,0] != pred[:])
wcnt += len(label[:,0])
print 'eval-error-after-setback=%f' % (float(werr) / wcnt)
| StarcoderdataPython |
6540940 | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - <NAME>, <EMAIL>, 2017-2022
import logging
import os
import traceback
from pilot.common.exception import FileHandlingFailure
from pilot.util import https
from pilot.util.config import config
from pilot.util.container import execute
from pilot.util.filehandling import write_file
logger = logging.getLogger(__name__)
def get_distinguished_name():
"""
Get the user DN.
Note: the DN is also sent by the server to the pilot in the job description (produserid).
:return: User DN (string).
"""
dn = ""
executable = 'arcproxy -i subject'
exit_code, stdout, stderr = execute(executable)
if exit_code != 0 or "ERROR:" in stderr:
logger.warning("arcproxy failed: ec=%d, stdout=%s, stderr=%s" % (exit_code, stdout, stderr))
if "command not found" in stderr or "Can not find certificate file" in stderr:
logger.warning("arcproxy experienced a problem (will try voms-proxy-info instead)")
# Default to voms-proxy-info
executable = 'voms-proxy-info -subject'
exit_code, stdout, stderr = execute(executable)
if exit_code == 0:
dn = stdout
logger.info('DN = %s' % dn)
cn = "/CN=proxy"
if not dn.endswith(cn):
logger.info("DN does not end with %s (will be added)" % cn)
dn += cn
else:
logger.warning("user=self set but cannot get proxy: %d, %s" % (exit_code, stdout))
return dn
def get_proxy(proxy_outfile_name, voms_role):
"""
:param proxy_outfile_name: specify the file to store proxy (string).
:param voms_role: what proxy (role) to request, e.g. 'atlas' (string).
:return: True on success (Boolean).
"""
try:
# it assumes that https_setup() was done already
url = os.environ.get('PANDA_SERVER_URL', config.Pilot.pandaserver)
res = https.request('{pandaserver}/server/panda/getProxy'.format(pandaserver=url), data={'role': voms_role})
if res is None:
logger.error(f"unable to get proxy with role '{voms_role}' from panda server")
return False
if res['StatusCode'] != 0:
logger.error(f"panda server returned: \'{res['errorDialog']}\' for proxy role \'{voms_role}\'")
return False
proxy_contents = res['userProxy']
except Exception as exc:
logger.error(f"Get proxy from panda server failed: {exc}, {traceback.format_exc()}")
return False
res = False
try:
# pre-create empty proxy file with secure permissions. Prepare it for write_file() which can not
# set file permission mode, it will writes to the existing file with correct permissions.
_file = os.open(proxy_outfile_name, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
os.close(_file)
res = write_file(proxy_outfile_name, proxy_contents, mute=False) # returns True on success
except (IOError, OSError, FileHandlingFailure) as exc:
logger.error(f"exception caught:\n{exc},\ntraceback: {traceback.format_exc()}")
return res
def create_cert_files(from_proxy, workdir):
"""
Create cert/key pem files from given proxy and store in workdir.
These files are needed for communicating with logstash server.
:param from_proxy: path to proxy file (string).
:param workdir: work directory (string).
:return: path to crt.pem (string), path to key.pem (string).
"""
_files = [os.path.join(workdir, 'crt.pem'), os.path.join(workdir, 'key.pem')]
if os.path.exists(_files[0]) and os.path.exists(_files[1]):
return _files[0], _files[1]
cmds = [f'openssl pkcs12 -in {from_proxy} -out {_files[0]} -clcerts -nokeys',
f'openssl pkcs12 -in {from_proxy} -out {_files[1]} -nocerts -nodes']
counter = 0
for cmd in cmds:
ec, stdout, stderr = execute(cmd)
if ec:
logger.warning(f'cert command failed: {stdout}, {stderr}')
return '', ''
else:
logger.debug(f'produced key/cert file: {_files[counter]}')
counter += 1
return _files[0], _files[1]
| StarcoderdataPython |
3546422 | from utils import *
from Sprint import Sprint
from stasis.Singleton import get as db
from stasis.ActiveRecord import ActiveRecord, link
class Group(ActiveRecord):
sprint = link(Sprint, 'sprintid')
def __init__(self, sprintid, name, seq = None, deletable = True, id = None):
ActiveRecord.__init__(self)
self.id = id
self.sprintid = sprintid
self.name = name
self.seq = seq if seq else maxOr(group.seq for group in self.sprint.getGroups()) + 1
self.deletable = to_bool(deletable)
def getTasks(self):
return filter(lambda t: t.group and t.group == self, self.sprint.getTasks())
def save(self):
if not self.id:
# Shift everything after this sequence
for id, group in db()['groups'].iteritems():
if group['sprintid'] == self.sprintid and group['seq'] >= self.seq:
with db()['groups'].change(id) as data:
data['seq'] += 1
return ActiveRecord.save(self)
def move(self, newSeq):
# Remove group from the list
for id, group in db()['groups'].iteritems():
if group['sprintid'] == self.sprintid and group['seq'] > self.seq:
with db()['groups'].change(id) as data:
data['seq'] -= 1
# Insert it at the new spot
if newSeq:
self.seq = newSeq
for id, group in db()['groups'].iteritems():
if group['sprintid'] == self.sprintid and group['seq'] >= self.seq:
with db()['groups'].change(id) as data:
data['seq'] += 1
self.save()
def delete(self):
self.move(None)
return ActiveRecord.delete(self)
def __str__(self):
return self.safe.name
| StarcoderdataPython |
1778655 | <gh_stars>0
"""Args:
param1 (int): byte as int value in binary
Returns:
True if input indicates a sys_ex event, False if otherwise.
"""
def is_sys_ex(n):
# hex status bytes 0xF0 and 0xF7
# dec values between 240 and 247
dec_val = int(n, 2)
if dec_val >= 240 or dec_val <= 247:
return True
else:
return False
| StarcoderdataPython |
6636568 | <filename>qiskit_metal/_gui/endcap_q3d_ui.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'endcap_q3d_ui.ui',
# licensing of 'endcap_q3d_ui.ui' applies.
#
# Created: Thu Jan 7 16:03:08 2021
# by: pyside2-uic running on PySide2 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_mainWindow(object):
def setupUi(self, mainWindow):
mainWindow.setObjectName("mainWindow")
mainWindow.resize(322, 530)
self.centralwidget = QtWidgets.QWidget(mainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(10, 10, 271, 20))
self.label.setObjectName("label")
self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(100, 450, 131, 32))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.renderButton = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.renderButton.setObjectName("renderButton")
self.verticalLayout.addWidget(self.renderButton)
self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)
self.tableWidget.setGeometry(QtCore.QRect(10, 30, 301, 421))
self.tableWidget.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustIgnored)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
mainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar()
self.menubar.setGeometry(QtCore.QRect(0, 0, 322, 22))
self.menubar.setObjectName("menubar")
mainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(mainWindow)
self.statusbar.setObjectName("statusbar")
mainWindow.setStatusBar(self.statusbar)
self.retranslateUi(mainWindow)
QtCore.QObject.connect(self.renderButton, QtCore.SIGNAL("clicked()"), mainWindow.render_everything)
QtCore.QMetaObject.connectSlotsByName(mainWindow)
def retranslateUi(self, mainWindow):
mainWindow.setWindowTitle(QtWidgets.QApplication.translate("mainWindow", "Q3D Endcaps", None, -1))
self.label.setText(QtWidgets.QApplication.translate("mainWindow", "Select endcap type for unconnected pins:", None, -1))
self.renderButton.setText(QtWidgets.QApplication.translate("mainWindow", "Render", None, -1))
| StarcoderdataPython |
1910911 | import os
import dj_database_url
from .common import *
# Production settings
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
# Allowed hosts can be set via the OMNIBUS_ALLOWED environment variable. It is
# required that the variable is set to either a single host, or a comma-
# separated string of hosts.
ALLOWED_HOSTS = os.environ.get("ALLOWED_HOSTS", "localhost").split(",")
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# For more info see: https://github.com/jacobian/dj-database-url
DATABASES = {"default": dj_database_url.config()}
# WhiteNoise settings
# http://whitenoise.evans.io/en/stable/index.html
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
| StarcoderdataPython |
3488078 | # imports required
from django.shortcuts import render,redirect
from django.contrib.auth import authenticate,login,logout
from django.contrib.auth.models import User
from django.contrib import messages
from app.models import *
from datetime import datetime, timedelta
from django.contrib.auth.decorators import login_required
# home page
def index(request):
context = {
'products': Product.objects.all(),
}
return render(request,"index.html",context)
# users urls
# login page
def login_page(request):
return render(request,"login.html",{})
# login function
def login_view(request):
username=request.POST['username']
password=request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
login(request,user)
return redirect("app:index")
else:
messages.info(request,"username or password incorrect[login failed]")
return redirect("app:login_page")
# logout function
@login_required()
def logout_view(request):
logout(request)
messages.info(request,"[logout success]")
return redirect("app:index")
# signup page
def signup_page(request):
if request.user.is_authenticated==True:
return render(request,"index.html",{})
else:
return render(request,"signup.html",{})
# signup function
def signup_view(request):
if request.method=='POST'and request.FILES['picture']:
username=request.POST['username']
email=request.POST['email']
password1=request.POST['<PASSWORD>']
password2=request.POST['<PASSWORD>']
image=request.FILES['picture']
slug=request.POST['slug']
if User.objects.filter(username=username):
messages.info(request,"faild ..user already exists")
return redirect("app:signup_page")
elif User.objects.filter(email=email):
messages.info(request,"failed...email already taken")
return redirect("app:signup_page")
elif password1!=password2:
messages.info(request,"failed...password not mached")
return redirect("app:signup_page")
else:
user=User.objects.create_user(username=username,email=email,password=<PASSWORD>,picture=image,slug=slug)
user.save();
messages.info(request,"signup successful")
return redirect("app:login_page")
else:
return redirect("app:signup_page")
# reset page
def reset_page(request):
return render(request,"reset.html")
# reset function
def reset_password_view(request):
if request.method=="POST":
email=request.POST['email']
password=request.POST['password']
res=User.objects.filter(email=email)
if res:
user=User.objects.get(email=email)
user.set_password(password)
user.save();
messages.info(request,"password reset successful")
return redirect("app:login_page")
else:
messages.info(request,"email not matched")
return redirect("app:reset_page")
else:
return redirect("app:reset_page")
@login_required()
def update_profile_page(request):
return render(request,"update_profile_page.html")
@login_required()
def update_profile(request,slug):
user=User.objects.get(slug=slug)
if request.method=='POST'and request.FILES['picture']:
username=request.POST['username']
email=request.POST['email']
phone=request.POST['phone']
address=request.POST['address']
image=request.FILES['picture']
username=username
user.email=email
user.phone=phone
user.address=address
user.picture=image
user.save()
return redirect("app:index")
def save_contact(request):
if request.method=='POST':
firstname=request.POST['firstname']
lastname=request.POST['lastname']
email=request.POST['email']
phone=request.POST['phone']
address=request.POST['address']
contact=Contacts.objects.create(firstname=firstname,lastname=lastname,email=email,phone=phone,address=address)
contact.save()
return redirect("app:index")
@login_required()
def detail(request,id):
context = {
'product': Product.objects.get(id=id)
}
return render(request, "detail.html", context)
@login_required()
def add_cart(request, id):
product=Product.objects.get(id=id)
user=request.user
if Cart.objects.filter(product=product, customer=user):
Cart.objects.filter(product=product, customer=user).delete()
cart=Cart(product=product,customer=user)
cart.save()
return redirect("app:view_cart")
@login_required()
def view_cart(request):
return render(request, "cart_detail.html")
@login_required()
def add_order(request, id):
product = Product.objects.get(id=id)
user = request.user
Order(product=product, customer=user).save()
if Cart.objects.filter(product=product, customer=user):
Cart.objects.filter(product=product, customer=user).delete()
return redirect("app:view_order")
@login_required()
def view_order(request):
return render(request, "order_detail.html")
@login_required()
def remove_order(request,id):
Order.objects.get(id=id).delete()
return redirect("app:view_order")
@login_required()
def remove_cart(request,id):
Cart.objects.get(id=id).delete()
return redirect("app:view_cart")
| StarcoderdataPython |
3226618 | #!/usr/bin/env python
from __future__ import print_function
import cx_Oracle
import datetime
import calendar
import sys
import logging
import CondCore.Utilities.conddb_serialization_metadata as sm
import CondCore.Utilities.credentials as auth
import CondCore.Utilities.conddb_time as conddb_time
import os
authPathEnvVar = 'COND_AUTH_PATH'
prod_db_service = ('cms_orcon_prod',{'w':'cms_orcon_prod/cms_cond_general_w','r':'cms_orcon_prod/cms_cond_general_r'})
adg_db_service = ('cms_orcon_adg',{'r':'cms_orcon_adg/cms_cond_general_r'})
dev_db_service = ('cms_orcoff_prep',{'w':'cms_orcoff_prep/cms_cond_general_w','r':'cms_orcoff_prep/cms_cond_general_r'})
schema_name = 'CMS_CONDITIONS'
fmt_str = "[%(asctime)s] %(levelname)s: %(message)s"
logLevel = logging.INFO
logFormatter = logging.Formatter(fmt_str)
def print_table( headers, table ):
ws = []
for h in headers:
ws.append(len(h))
for row in table:
ind = 0
for c in row:
c = str(c)
if ind<len(ws):
if len(c)> ws[ind]:
ws[ind] = len(c)
ind += 1
def printf( row ):
line = ''
ind = 0
for w in ws:
fmt = '{:<%s}' %w
if ind<len(ws):
line += (fmt.format( row[ind] )+' ')
ind += 1
print(line)
printf( headers )
hsep = ''
for w in ws:
fmt = '{:-<%s}' %w
hsep += (fmt.format('')+' ')
print(hsep)
for row in table:
printf( row )
class version_db(object):
def __init__(self, db ):
self.db = db
self.cmssw_boost_map = {}
self.boost_run_map = []
def fetch_cmssw_boost_map( self ):
cursor = self.db.cursor()
cursor.execute('SELECT BOOST_VERSION, CMSSW_VERSION FROM CMSSW_BOOST_MAP');
rows = cursor.fetchall()
self.cmssw_boost_map = {}
for r in rows:
self.cmssw_boost_map[r[1]]=r[0]
return self.cmssw_boost_map
def fetch_boost_run_map( self ):
cursor = self.db.cursor()
cursor.execute('SELECT RUN_NUMBER, RUN_START_TIME, BOOST_VERSION, INSERTION_TIME FROM BOOST_RUN_MAP ORDER BY RUN_NUMBER, INSERTION_TIME')
rows = cursor.fetchall()
self.boost_run_map = []
for r in rows:
self.boost_run_map.append( (r[0],r[1],r[2],str(r[3])) )
return self.boost_run_map
def insert_boost_run_range( self, run, boost_version, min_ts ):
cursor = self.db.cursor()
cursor.execute('SELECT MIN(RUN_NUMBER) FROM RUN_INFO WHERE RUN_NUMBER >= :RUN',(run,))
res = cursor.fetchone()
if res is not None and res[0] is not None:
min_run = res[0]
cursor.execute('SELECT START_TIME FROM RUN_INFO WHERE RUN_NUMBER=:RUN',(min_run,))
min_run_time = cursor.fetchone()[0]
min_run_ts = calendar.timegm( min_run_time.utctimetuple() ) << 32
else:
min_run = run
min_run_ts = conddb_time.string_to_timestamp(min_ts)
now = datetime.datetime.utcnow()
cursor.execute('INSERT INTO BOOST_RUN_MAP ( RUN_NUMBER, RUN_START_TIME, BOOST_VERSION, INSERTION_TIME ) VALUES (:RUN, :RUN_START_T, :BOOST, :TIME)',(run,min_run_ts,boost_version,now) )
def insert_cmssw_boost( self, cmssw_version,boost_version ):
cursor = self.db.cursor()
cursor.execute('INSERT INTO CMSSW_BOOST_MAP ( CMSSW_VERSION, BOOST_VERSION ) VALUES ( :CMSSW_VERSION, :BOOST_VERSION )',(cmssw_version,boost_version))
def lookup_boost_in_cmssw( self, cmssw_version ):
cmssw_v = sm.check_cmssw_version( cmssw_version )
the_arch = None
releaseRoot = None
if sm.is_release_cycle( cmssw_v ):
cmssw_v = sm.strip_cmssw_version( cmssw_v )
archs = sm.get_production_arch( cmssw_v )
for arch in archs:
path = sm.get_release_root( cmssw_v, arch )
if os.path.exists(os.path.join(path,cmssw_v)):
releaseRoot = path
the_arch = arch
break
if releaseRoot is None:
for arch in archs:
the_arch = arch
releaseRoot = sm.get_release_root( cmssw_v, arch )
for r in sorted (os.listdir( releaseRoot )):
if r.startswith(cmssw_v):
cmssw_v = r
logging.debug('Boost version will be verified in release %s' %cmssw_v)
if cmssw_v in self.cmssw_boost_map.keys():
return self.cmssw_boost_map[cmssw_v]
if releaseRoot is None:
archs = sm.get_production_arch( cmssw_v )
for arch in archs:
path = sm.get_release_root( cmssw_v, arch )
if os.path.exists(os.path.join(path,cmssw_v)):
releaseRoot = path
the_arch = arch
break
logging.debug('Release path: %s' %releaseRoot)
boost_version = sm.get_cmssw_boost( the_arch, '%s/%s' %(releaseRoot,cmssw_v) )
if not boost_version is None:
self.cmssw_boost_map[cmssw_v] = boost_version
self.insert_cmssw_boost( cmssw_v,boost_version )
return boost_version
def populate_for_gts( self ):
cursor = self.db.cursor()
cursor.execute('SELECT DISTINCT(RELEASE) FROM GLOBAL_TAG')
rows = cursor.fetchall()
for r in rows:
self.lookup_boost_in_cmssw( r[0] )
class conddb_tool(object):
def __init__( self ):
self.db = None
self.version_db = None
self.args = None
self.logger = logging.getLogger()
self.logger.setLevel(logLevel)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
self.logger.addHandler(consoleHandler)
self.iovs = None
self.versionIovs = None
def connect( self ):
if self.args.db is None:
self.args.db = 'pro'
if self.args.db == 'dev' or self.args.db == 'oradev' :
db_service = dev_db_service
elif self.args.db == 'orapro':
db_service = adg_db_service
elif self.args.db != 'onlineorapro' or self.args.db != 'pro':
db_service = prod_db_service
else:
raise Exception("Database '%s' is not known." %args.db )
if self.args.accessType not in db_service[1].keys():
raise Exception('The specified database connection %s does not support the requested action.' %db_service[0])
service = db_service[1][self.args.accessType]
creds = auth.get_credentials( authPathEnvVar, service, self.args.auth )
if creds is None:
raise Exception("Could not find credentials for service %s" %service)
(username, account, pwd) = creds
connStr = '%s/%s@%s' %(username,pwd,db_service[0])
self.db = cx_Oracle.connect(connStr)
logging.info('Connected to %s as user %s' %(db_service[0],username))
self.db.current_schema = schema_name
def process_tag_boost_version( self, t, timetype, tagBoostVersion, minIov, timeCut, validate ):
if self.iovs is None:
self.iovs = []
cursor = self.db.cursor()
stmt = 'SELECT IOV.SINCE SINCE, IOV.INSERTION_TIME INSERTION_TIME, P.STREAMER_INFO STREAMER_INFO FROM TAG, IOV, PAYLOAD P WHERE TAG.NAME = IOV.TAG_NAME AND P.HASH = IOV.PAYLOAD_HASH AND TAG.NAME = :TAG_NAME'
params = (t,)
if timeCut and tagBoostVersion is not None and not validate:
whereClauseOnSince = ' AND IOV.INSERTION_TIME>:TIME_CUT'
stmt = stmt + whereClauseOnSince
params = params + (timeCut,)
stmt = stmt + ' ORDER BY SINCE'
logging.debug('Executing: "%s"' %stmt)
cursor.execute(stmt,params)
for r in cursor:
streamer_info = str(r[2].read())
self.iovs.append((r[0],r[1],streamer_info))
niovs = 0
self.versionIovs = []
lastBoost = None
update = False
if tagBoostVersion is not None:
update = True
for iov in self.iovs:
if validate and timeCut is not None and timeCut < iov[1]:
continue
niovs += 1
iovBoostVersion, tagBoostVersion = sm.update_tag_boost_version( tagBoostVersion, minIov, iov[2], iov[0], timetype, self.version_db.boost_run_map )
if minIov is None or iov[0]<minIov:
minIov = iov[0]
logging.debug('iov: %s - inserted on %s - streamer: %s' %(iov[0],iov[1],iov[2]))
logging.debug('current tag boost version: %s minIov: %s' %(tagBoostVersion,minIov))
if lastBoost is None or lastBoost!=iovBoostVersion:
self.versionIovs.append((iov[0],iovBoostVersion))
lastBoost = iovBoostVersion
if tagBoostVersion is None:
if niovs == 0:
logging.warning( 'No iovs found. boost version cannot be determined.')
return None, None
else:
logging.error('Could not determine the tag boost version.' )
return None, None
else:
if niovs == 0:
logging.info('Tag boost version has not changed.')
else:
msg = 'Found tag boost version %s ( min iov: %s ) combining payloads from %s iovs' %(tagBoostVersion,minIov,niovs)
if timeCut is not None:
if update:
msg += ' (iov insertion time>%s)' %str(timeCut)
else:
msg += ' (iov insertion time<%s)' %str(timeCut)
logging.info( msg )
return tagBoostVersion, minIov
def validate_boost_version( self, t, timetype, tagBoostVersion ):
cursor = self.db.cursor()
cursor.execute('SELECT GT.NAME, GT.RELEASE, GT.SNAPSHOT_TIME FROM GLOBAL_TAG GT, GLOBAL_TAG_MAP GTM WHERE GT.NAME = GTM.GLOBAL_TAG_NAME AND GTM.TAG_NAME = :TAG_NAME',(t,))
rows = cursor.fetchall()
invalid_gts = []
ngt = 0
gts = []
for r in rows:
gts.append((r[0],r[1],r[2]))
if len(gts)>0:
logging.info('validating %s gts.' %len(gts))
boost_snapshot_map = {}
for gt in gts:
ngt += 1
logging.debug('Validating for GT %s (release %s)' %(gt[0],gt[1]))
gtCMSSWVersion = sm.check_cmssw_version( gt[1] )
gtBoostVersion = self.version_db.lookup_boost_in_cmssw( gtCMSSWVersion )
if sm.cmp_boost_version( gtBoostVersion, tagBoostVersion )<0:
logging.warning( 'The boost version computed from all the iovs in the tag (%s) is incompatible with the gt [%s] %s (consuming ver: %s, snapshot: %s)' %(tagBoostVersion,ngt,gt[0],gtBoostVersion,str(gt[2])))
if str(gt[2]) not in boost_snapshot_map.keys():
tagSnapshotBoostVersion = None
minIov = None
tagSnapshotBoostVersion, minIov = self.process_tag_boost_version(t, timetype, tagSnapshotBoostVersion, minIov, gt[2])
if tagSnapshotBoostVersion is not None:
boost_snapshot_map[str(gt[2])] = tagSnapshotBoostVersion
else:
continue
else:
tagSnapshotBoostVersion = boost_snapshot_map[str(gt[2])]
if sm.cmp_boost_version( gtBoostVersion, tagSnapshotBoostVersion )<0:
logging.error('The snapshot from tag used by gt %s (consuming ver: %s) has an incompatible combined boost version %s' %(gt[0],gtBoostVersion,tagSnapshotBoostVersion))
invalid_gts.append( ( gt[0], gtBoostVersion ) )
if len(invalid_gts)==0:
if ngt>0:
logging.info('boost version for the tag validated in %s referencing Gts' %(ngt))
else:
logging.info('No GT referencing this tag found.')
else:
logging.error( 'boost version for the tag is invalid.')
return invalid_gts
def update_tag_boost_version_in_db( self, t, tagBoostVersion, minIov, update ):
cursor = self.db.cursor()
now = datetime.datetime.utcnow()
if update:
cursor.execute('UPDATE TAG_METADATA SET MIN_SERIALIZATION_V=:BOOST_V, MIN_SINCE=:MIN_IOV, MODIFICATION_TIME=:NOW WHERE TAG_NAME = :NAME',( tagBoostVersion,minIov,now,t))
else:
cursor.execute('INSERT INTO TAG_METADATA ( TAG_NAME, MIN_SERIALIZATION_V, MIN_SINCE, MODIFICATION_TIME ) VALUES ( :NAME, :BOOST_V, :MIN_IOV, :NOW )',(t, tagBoostVersion,minIov,now))
logging.info('Minimum boost version for the tag updated.')
def update_tags( self ):
cursor = self.db.cursor()
self.version_db = version_db( self.db )
self.version_db.fetch_cmssw_boost_map()
self.version_db.fetch_boost_run_map()
tags = {}
wpars = ()
if self.args.name is not None:
stmt0 = 'SELECT NAME FROM TAG WHERE NAME = :TAG_NAME'
wpars = (self.args.name,)
cursor.execute(stmt0,wpars);
rows = cursor.fetchall()
found = False
for r in rows:
found = True
break
if not found:
raise Exception('Tag %s does not exists in the database.' %self.args.name )
tags[self.args.name] = None
stmt1 = 'SELECT MIN_SERIALIZATION_V, MIN_SINCE, CAST(MODIFICATION_TIME AS TIMESTAMP(0)) FROM TAG_METADATA WHERE TAG_NAME = :NAME'
cursor.execute(stmt1,wpars);
rows = cursor.fetchall()
for r in rows:
tags[self.args.name] = (r[0],r[1],r[2])
else:
stmt0 = 'SELECT NAME FROM TAG WHERE NAME NOT IN ( SELECT TAG_NAME FROM TAG_METADATA) ORDER BY NAME'
nmax = 100
if self.args.max is not None:
nmax = self.args.max
if self.args.all:
nmax = -1
if nmax >=0:
stmt0 = 'SELECT NAME FROM (SELECT NAME FROM TAG WHERE NAME NOT IN ( SELECT TAG_NAME FROM TAG_METADATA ) ORDER BY NAME) WHERE ROWNUM<= :MAXR'
wpars = (nmax,)
cursor.execute(stmt0,wpars);
rows = cursor.fetchall()
for r in rows:
tags[r[0]] = None
stmt1 = 'SELECT T.NAME NAME, TM.MIN_SERIALIZATION_V MIN_SERIALIZATION_V, TM.MIN_SINCE MIN_SINCE, CAST(TM.MODIFICATION_TIME AS TIMESTAMP(0)) MODIFICATION_TIME FROM TAG T, TAG_METADATA TM WHERE T.NAME=TM.TAG_NAME AND CAST(TM.MODIFICATION_TIME AS TIMESTAMP(0)) < (SELECT MAX(INSERTION_TIME) FROM IOV WHERE IOV.TAG_NAME=TM.TAG_NAME) ORDER BY NAME'
nmax = nmax-len(tags)
if nmax >=0:
stmt1 = 'SELECT NAME, MIN_SERIALIZATION_V, MIN_SINCE, MODIFICATION_TIME FROM (SELECT T.NAME NAME, TM.MIN_SERIALIZATION_V MIN_SERIALIZATION_V, TM.MIN_SINCE MIN_SINCE, CAST(TM.MODIFICATION_TIME AS TIMESTAMP(0)) MODIFICATION_TIME FROM TAG T, TAG_METADATA TM WHERE T.NAME=TM.TAG_NAME AND CAST(TM.MODIFICATION_TIME AS TIMESTAMP(0)) < (SELECT MAX(INSERTION_TIME) FROM IOV WHERE IOV.TAG_NAME=TM.TAG_NAME) ORDER BY NAME) WHERE ROWNUM<= :MAXR'
wpars = (nmax,)
cursor.execute(stmt1,wpars);
rows = cursor.fetchall()
i = 0
for r in rows:
i += 1
if nmax >=0 and i>nmax:
break
tags[r[0]] = (r[1],r[2],r[3])
logging.info( 'Processing boost version for %s tags' %len(tags))
count = 0
for t in sorted(tags.keys()):
count += 1
try:
update = False
cursor.execute('SELECT TIME_TYPE FROM TAG WHERE NAME= :TAG_NAME',(t,))
timetype = cursor.fetchone()[0]
self.iovs = None
logging.info('************************************************************************')
logging.info('Tag [%s] %s - timetype: %s' %(count,t,timetype))
tagBoostVersion = None
minIov = None
timeCut = None
if tags[t] is not None:
update = True
tagBoostVersion = tags[t][0]
minIov = tags[t][1]
timeCut = tags[t][2]
tagBoostVersion, minIov = self.process_tag_boost_version( t, timetype, tagBoostVersion, minIov, timeCut, self.args.validate )
if tagBoostVersion is None:
continue
logging.debug('boost versions in the %s iovs: %s' %(len(self.iovs),str(self.versionIovs)))
if self.args.validate:
invalid_gts = self.validate_boost_version( t, timetype, tagBoostVersion )
if len(invalid_gts)>0:
with open('invalid_tags_in_gts.txt','a') as error_file:
for gt in invalid_gts:
error_file.write('Tag %s (boost %s) is invalid for GT %s ( boost %s) \n' %(t,tagBoostVersion,gt[0],gt[1]))
if len(self.iovs):
if self.iovs[0][0]<minIov:
minIov = self.iovs[0]
self.update_tag_boost_version_in_db( t, tagBoostVersion, minIov, update )
self.db.commit()
except Exception as e:
logging.error(str(e))
def insert_boost_run( self ):
cursor = self.db.cursor()
self.version_db = version_db( self.db )
if self.args.min_ts is None:
raise Exception("Run %s has not been found in the database - please provide an explicit TimeType value with the min_ts parameter ." %self.args.since )
self.version_db.insert_boost_run_range( self.args.since, self.args.label, self.args.min_ts )
self.db.commit()
logging.info('boost version %s inserted with since %s' %(self.args.label,self.args.since))
def list_boost_run( self ):
cursor = self.db.cursor()
self.version_db = version_db( self.db )
self.version_db.fetch_boost_run_map()
headers = ['Run','Run start time','Boost Version','Insertion time']
print_table( headers, self.version_db.boost_run_map )
def show_tag_boost_version( self ):
cursor = self.db.cursor()
tag = self.args.tag_name
cursor.execute('SELECT TIME_TYPE FROM TAG WHERE NAME= :TAG_NAME',(tag,))
rows = cursor.fetchall()
timeType = None
t_modificationTime = None
for r in rows:
timeType = r[0]
if timeType is None:
raise Exception("Tag %s does not exist in the database." %tag)
cursor.execute('SELECT MAX(INSERTION_TIME) FROM IOV WHERE TAG_NAME= :TAG_NAME',(tag,))
rows = cursor.fetchall()
for r in rows:
t_modificationTime = r[0]
if t_modificationTime is None:
raise Exception("Tag %s does not have any iov stored." %tag)
logging.info('Tag %s - timetype: %s' %(tag,timeType))
cursor.execute('SELECT MIN_SERIALIZATION_V, MIN_SINCE, MODIFICATION_TIME FROM TAG_METADATA WHERE TAG_NAME= :TAG_NAME',(tag,))
rows = cursor.fetchall()
tagBoostVersion = None
minIov = None
v_modificationTime = None
for r in rows:
tagBoostVersion = r[0]
minIov = r[1]
v_modificationTime = r[2]
if v_modificationTime is not None:
if t_modificationTime > v_modificationTime:
logging.warning('The minimum boost version stored is out of date.')
else:
logging.info('The minimum boost version stored is up to date.')
mt = '-'
if v_modificationTime is not None:
mt = str(v_modificationTime)
r_tagBoostVersion = None
if self.args.rebuild or self.args.full:
self.version_db = version_db( self.db )
self.version_db.fetch_boost_run_map()
timeCut = None
logging.info('Calculating minimum boost version for the available iovs...')
r_tagBoostVersion, r_minIov = self.process_tag_boost_version( tag, timeType, tagBoostVersion, minIov, timeCut )
print('# Currently stored: %s (min iov:%s)' %(tagBoostVersion,minIov))
print('# Last update: %s' %mt)
print('# Last update on the iovs: %s' %str(t_modificationTime))
if self.args.rebuild or self.args.full:
print('# Based on the %s available IOVs: %s (min iov:%s)' %(len(self.iovs),r_tagBoostVersion,r_minIov))
if self.args.full:
headers = ['Run','Boost Version']
print_table( headers, self.versionIovs )
import optparse
import argparse
def main():
tool = conddb_tool()
parser = argparse.ArgumentParser(description='CMS conddb command-line tool for serialiation metadata. For general help (manual page), use the help subcommand.')
parser.add_argument('--db', type=str, help='The target database: pro ( for prod ) or dev ( for prep ). default=pro')
parser.add_argument("--auth","-a", type=str, help="The path of the authentication file")
parser.add_argument('--verbose', '-v', action='count', help='The verbosity level')
parser_subparsers = parser.add_subparsers(title='Available subcommands')
parser_update_tags = parser_subparsers.add_parser('update_tags', description='Update the existing tag headers with the boost version')
parser_update_tags.add_argument('--name', '-n', type=str, help='Name of the specific tag to process (default=None - in this case all of the tags will be processed.')
parser_update_tags.add_argument('--max', '-m', type=int, help='the maximum number of tags processed',default=100)
parser_update_tags.add_argument('--all',action='store_true', help='process all of the tags with boost_version = None')
parser_update_tags.add_argument('--validate',action='store_true', help='validate the tag/boost version under processing')
parser_update_tags.set_defaults(func=tool.update_tags,accessType='w')
parser_insert_boost_version = parser_subparsers.add_parser('insert', description='Insert a new boost version range in the run map')
parser_insert_boost_version.add_argument('--label', '-l',type=str, help='The boost version label',required=True)
parser_insert_boost_version.add_argument('--since', '-s',type=int, help='The since validity (run number)',required=True)
parser_insert_boost_version.add_argument('--min_ts', '-t',type=str, help='The since validity (Time timetype)', required=False)
parser_insert_boost_version.set_defaults(func=tool.insert_boost_run,accessType='w')
parser_list_boost_versions = parser_subparsers.add_parser('list', description='list the boost versions in the run map')
parser_list_boost_versions.set_defaults(func=tool.list_boost_run,accessType='r')
parser_show_version = parser_subparsers.add_parser('show_tag', description='Display the minimum boost version for the specified tag (the value stored, by default)')
parser_show_version.add_argument('tag_name',help='The name of the tag')
parser_show_version.add_argument('--rebuild','-r',action='store_true',default=False,help='Re-calculate the minimum boost versio ')
parser_show_version.add_argument('--full',action='store_true',default=False,help='Recalulate the minimum boost version, listing the versions in the iov sequence')
parser_show_version.set_defaults(func=tool.show_tag_boost_version,accessType='r')
args = parser.parse_args()
tool.args = args
if args.verbose >=1:
tool.logger.setLevel(logging.DEBUG)
tool.connect()
return args.func()
else:
try:
tool.connect()
sys.exit( args.func())
except Exception as e:
logging.error(e)
sys.exit(1)
if __name__ == '__main__':
main()
| StarcoderdataPython |
9771350 | import numpy as np
import gym
class POMDPWrapper(gym.ObservationWrapper):
def __init__(self, env_name, pomdp_type='remove_velocity',
flicker_prob=0.2, random_noise_sigma=0.1, random_sensor_missing_prob=0.1):
"""
:param env_name:
:param pomdp_type:
1. remove_velocity: remove velocity related observation
2. flickering: obscure the entire observation with a certain probability at each time step with the
probability flicker_prob.
3. random_noise: each sensor in an observation is disturbed by a random noise Normal ~ (0, sigma).
4. random_sensor_missing: each sensor in an observation will miss with a relatively low probability sensor_miss_prob
5. remove_velocity_and_flickering:
6. remove_velocity_and_random_noise:
7. remove_velocity_and_random_sensor_missing:
8. flickering_and_random_noise:
9. random_noise_and_random_sensor_missing
10. random_sensor_missing_and_random_noise:
"""
super().__init__(gym.make(env_name))
self.pomdp_type = pomdp_type
self.flicker_prob = flicker_prob
self.random_noise_sigma = random_noise_sigma
self.random_sensor_missing_prob = random_sensor_missing_prob
if pomdp_type == 'remove_velocity':
# Remove Velocity info, comes with the change in observation space.
self.remain_obs_idx, self.observation_space = self._remove_velocity(env_name)
elif pomdp_type == 'flickering':
pass
elif self.pomdp_type == 'random_noise':
pass
elif self.pomdp_type == 'random_sensor_missing':
pass
elif self.pomdp_type == 'remove_velocity_and_flickering':
# Remove Velocity Info, comes with the change in observation space.
self.remain_obs_idx, self.observation_space = self._remove_velocity(env_name)
elif self.pomdp_type == 'remove_velocity_and_random_noise':
# Remove Velocity Info, comes with the change in observation space.
self.remain_obs_idx, self.observation_space = self._remove_velocity(env_name)
elif self.pomdp_type == 'remove_velocity_and_random_sensor_missing':
# Remove Velocity Info, comes with the change in observation space.
self.remain_obs_idx, self.observation_space = self._remove_velocity(env_name)
elif self.pomdp_type == 'flickering_and_random_noise':
pass
elif self.pomdp_type == 'random_noise_and_random_sensor_missing':
pass
elif self.pomdp_type == 'random_sensor_missing_and_random_noise':
pass
else:
raise ValueError("pomdp_type was not specified!")
def observation(self, obs):
# Single source of POMDP
if self.pomdp_type == 'remove_velocity':
return obs.flatten()[self.remain_obs_idx]
elif self.pomdp_type == 'flickering':
# Note: flickering is equivalent to:
# flickering_and_random_sensor_missing, random_noise_and_flickering, random_sensor_missing_and_flickering
if np.random.rand() <= self.flicker_prob:
return np.zeros(obs.shape)
else:
return obs.flatten()
elif self.pomdp_type == 'random_noise':
return (obs + np.random.normal(0, self.random_noise_sigma, obs.shape)).flatten()
elif self.pomdp_type == 'random_sensor_missing':
obs[np.random.rand(len(obs)) <= self.random_sensor_missing_prob] = 0
return obs.flatten()
# Multiple source of POMDP
elif self.pomdp_type == 'remove_velocity_and_flickering':
# Note: remove_velocity_and_flickering is equivalent to flickering_and_remove_velocity
# Remove velocity
new_obs = obs.flatten()[self.remain_obs_idx]
# Flickering
if np.random.rand() <= self.flicker_prob:
return np.zeros(new_obs.shape)
else:
return new_obs
elif self.pomdp_type == 'remove_velocity_and_random_noise':
# Note: remove_velocity_and_random_noise is equivalent to random_noise_and_remove_velocity
# Remove velocity
new_obs = obs.flatten()[self.remain_obs_idx]
# Add random noise
return (new_obs + np.random.normal(0, self.random_noise_sigma, new_obs.shape)).flatten()
elif self.pomdp_type == 'remove_velocity_and_random_sensor_missing':
# Note: remove_velocity_and_random_sensor_missing is equivalent to random_sensor_missing_and_remove_velocity
# Remove velocity
new_obs = obs.flatten()[self.remain_obs_idx]
# Random sensor missing
new_obs[np.random.rand(len(new_obs)) <= self.random_sensor_missing_prob] = 0
return new_obs
elif self.pomdp_type == 'flickering_and_random_noise':
# Flickering
if np.random.rand() <= self.flicker_prob:
new_obs = np.zeros(obs.shape)
else:
new_obs = obs
# Add random noise
return (new_obs + np.random.normal(0, self.random_noise_sigma, new_obs.shape)).flatten()
elif self.pomdp_type == 'random_noise_and_random_sensor_missing':
# Random noise
new_obs = (obs + np.random.normal(0, self.random_noise_sigma, obs.shape)).flatten()
# Random sensor missing
new_obs[np.random.rand(len(new_obs)) <= self.random_sensor_missing_prob] = 0
return new_obs
elif self.pomdp_type == 'random_sensor_missing_and_random_noise':
# Random sensor missing
obs[np.random.rand(len(obs)) <= self.random_sensor_missing_prob] = 0
# Random noise
return (obs + np.random.normal(0, self.random_noise_sigma, obs.shape)).flatten()
else:
raise ValueError("pomdp_type was not in ['remove_velocity', 'flickering', 'random_noise', 'random_sensor_missing']!")
def _remove_velocity(self, env_name):
# OpenAIGym
# 1. Classic Control
if env_name == "Pendulum-v0":
remain_obs_idx = np.arange(0, 2)
elif env_name == "Acrobot-v1":
remain_obs_idx = list(np.arange(0, 4))
elif env_name == "MountainCarContinuous-v0":
remain_obs_idx = list([0])
# 1. MuJoCo
elif env_name == "HalfCheetah-v3" or env_name == "HalfCheetah-v2":
remain_obs_idx = np.arange(0, 8)
elif env_name == "Ant-v3" or env_name == "Ant-v2":
remain_obs_idx = list(np.arange(0, 13)) + list(np.arange(27, 111))
elif env_name == 'Walker2d-v3' or env_name == "Walker2d-v2":
remain_obs_idx = np.arange(0, 8)
elif env_name == 'Hopper-v3' or env_name == "Hopper-v2":
remain_obs_idx = np.arange(0, 5)
elif env_name == "InvertedPendulum-v2":
remain_obs_idx = np.arange(0, 2)
elif env_name == "InvertedDoublePendulum-v2":
remain_obs_idx = list(np.arange(0, 5)) + list(np.arange(8, 11))
elif env_name == "Swimmer-v3" or env_name == "Swimmer-v2":
remain_obs_idx = np.arange(0, 3)
elif env_name == "Thrower-v2":
remain_obs_idx = list(np.arange(0, 7)) + list(np.arange(14, 23))
elif env_name == "Striker-v2":
remain_obs_idx = list(np.arange(0, 7)) + list(np.arange(14, 23))
elif env_name == "Pusher-v2":
remain_obs_idx = list(np.arange(0, 7)) + list(np.arange(14, 23))
elif env_name == "Reacher-v2":
remain_obs_idx = list(np.arange(0, 6)) + list(np.arange(8, 11))
elif env_name == 'Humanoid-v3' or env_name == "Humanoid-v2":
remain_obs_idx = list(np.arange(0, 22)) + list(np.arange(45, 185)) + list(np.arange(269, 376))
elif env_name == 'HumanoidStandup-v2':
remain_obs_idx = list(np.arange(0, 22)) + list(np.arange(45, 185)) + list(np.arange(269, 376))
# PyBulletEnv:
# The following is not implemented:
# HumanoidDeepMimicBulletEnv - v1
# CartPoleBulletEnv - v1
# MinitaurBulletEnv - v0
# MinitaurBulletDuckEnv - v0
# RacecarBulletEnv - v0
# RacecarZedBulletEnv - v0
# KukaBulletEnv - v0
# KukaCamBulletEnv - v0
# PusherBulletEnv - v0
# ThrowerBulletEnv - v0
# StrikerBulletEnv - v0
# HumanoidBulletEnv - v0
# HumanoidFlagrunBulletEnv - v0
# HumanoidFlagrunHarderBulletEnv - v0
elif env_name == 'HalfCheetahBulletEnv-v0':
remain_obs_idx = list(set(np.arange(0, 26)) - set(np.arange(3, 6)))
elif env_name == 'AntBulletEnv-v0':
remain_obs_idx = list(set(np.arange(0, 28)) - set(np.arange(3, 6)))
elif env_name == 'HopperBulletEnv-v0':
remain_obs_idx = list(set(np.arange(0, 15)) - set(np.arange(3, 6)))
elif env_name == 'Walker2DBulletEnv-v0':
remain_obs_idx = list(set(np.arange(0, 22)) - set(np.arange(3, 6)))
elif env_name == 'InvertedPendulumBulletEnv-v0':
remain_obs_idx = list(set(np.arange(0, 5)) - set([1, 4]))
elif env_name == 'InvertedDoublePendulumBulletEnv-v0':
remain_obs_idx = list(set(np.arange(0, 9)) - set([1, 5, 8]))
elif env_name == 'InvertedPendulumSwingupBulletEnv-v0':
pass
elif env_name == 'ReacherBulletEnv-v0':
remain_obs_idx = list(set(np.arange(0, 9)) - set([6, 8]))
# PyBulletGym
# 1. MuJoCo
elif env_name == 'HalfCheetahMuJoCoEnv-v0':
remain_obs_idx = np.arange(0, 8)
elif env_name == 'AntMuJoCoEnv-v0':
remain_obs_idx = list(np.arange(0, 13)) + list(np.arange(27, 111))
elif env_name == 'Walker2DMuJoCoEnv-v0':
remain_obs_idx = np.arange(0, 8)
elif env_name == 'HopperMuJoCoEnv-v0':
remain_obs_idx = np.arange(0, 7)
elif env_name == 'InvertedPendulumMuJoCoEnv-v0':
remain_obs_idx = np.arange(0, 3)
elif env_name == 'InvertedDoublePendulumMuJoCoEnv-v0':
remain_obs_idx = list(np.arange(0, 5)) + list(np.arange(8, 11))
# 2. Roboschool
elif env_name == 'HalfCheetahPyBulletEnv-v0':
remain_obs_idx = list(set(np.arange(0, 26)) - set(np.arange(3, 6)))
elif env_name == 'AntPyBulletEnv-v0':
remain_obs_idx = list(set(np.arange(0, 28)) - set(np.arange(3, 6)))
elif env_name == 'Walker2DPyBulletEnv-v0':
remain_obs_idx = list(set(np.arange(0, 22)) - set(np.arange(3, 6)))
elif env_name == 'HopperPyBulletEnv-v0':
remain_obs_idx = list(set(np.arange(0, 15)) - set(np.arange(3, 6)))
elif env_name == 'InvertedPendulumPyBulletEnv-v0':
remain_obs_idx = list(set(np.arange(0, 5)) - set([1, 4]))
elif env_name == 'InvertedDoublePendulumPyBulletEnv-v0':
remain_obs_idx = list(set(np.arange(0, 9)) - set([1, 5, 8]))
elif env_name == 'ReacherPyBulletEnv-v0':
remain_obs_idx = list(set(np.arange(0, 9)) - set([6, 8]))
else:
raise ValueError('POMDP for {} is not defined!'.format(env_name))
# Redefine observation_space
obs_low = np.array([-np.inf for i in range(len(remain_obs_idx))], dtype="float32")
obs_high = np.array([np.inf for i in range(len(remain_obs_idx))], dtype="float32")
observation_space = gym.spaces.Box(obs_low, obs_high)
return remain_obs_idx, observation_space
if __name__ == '__main__':
import pybulletgym
import gym
env = POMDPWrapper("AntPyBulletEnv-v0", 'remove_velocity_and_flickering')
obs = env.reset()
print(env.action_space)
print(env.observation_space)
print(obs)
| StarcoderdataPython |
326998 | <gh_stars>0
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module defines some basic common application functionality, like logging.
"""
import os
import types
import logbook
import yaml
def get_logger(logger_name="postgresql-metrics"):
return logbook.Logger(logger_name)
def figure_out_log_level(given_level):
if isinstance(given_level, types.StringTypes):
return logbook.lookup_level(given_level.strip().upper())
else:
return given_level
def init_logging_stderr(log_level='notset', bubble=False):
handler = logbook.StderrHandler(level=figure_out_log_level(log_level), bubble=bubble)
handler.push_application()
get_logger().debug("stderr logging initialized")
def init_logging_file(filename, log_level='notset', rotate_log=True, rotate_max_size=10485760,
bubble=True):
log_dir = os.path.dirname(filename)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if rotate_log is True:
handler = logbook.RotatingFileHandler(filename, level=figure_out_log_level(log_level),
max_size=int(rotate_max_size), bubble=bubble)
else:
handler = logbook.FileHandler(filename, level=figure_out_log_level(log_level),
bubble=bubble)
handler.push_application()
get_logger().debug("file based logging initialized in directory: " + log_dir)
def init_logging_syslog(log_level='notset', facility='local0', bubble=True):
handler = logbook.SyslogHandler('postgresql-metrics', facility=facility,
level=figure_out_log_level(log_level), bubble=bubble)
handler.push_application()
get_logger().debug("syslog logging initialized")
def merge_configs(to_be_merged, default):
"""Merges two configuration dictionaries by overwriting values with
same keys, with the priority on values given on the 'left' side, so
the to_be_merged dict.
Notice that with lists in the configuration, it skips from the default
(right side) the tuples in that which already exist in the left side
to_be_merged list. This is used to be able to override time intervals for
default values in the configuration.
Example:
In [1]: x = [["get_stats_disk_usage_for_database", 180],
["get_stats_tx_rate_for_database", 500]]
In [2]: y = [["get_stats_seconds_since_last_vacuum_per_table", 60],
["get_stats_tx_rate_for_database", 60]]
In [3]: merge_configs(x, y)
Out[3]:
[['get_stats_disk_usage_for_database', 180],
['get_stats_tx_rate_for_database', 500],
['get_stats_seconds_since_last_vacuum_per_table', 60]]
"""
if isinstance(to_be_merged, dict) and isinstance(default, dict):
for k, v in default.iteritems():
if k not in to_be_merged:
to_be_merged[k] = v
else:
to_be_merged[k] = merge_configs(to_be_merged[k], v)
elif isinstance(to_be_merged, list) and isinstance(default, list):
same_keys = set()
for x in to_be_merged:
for y in default:
if isinstance(x, (list, set, tuple)) and isinstance(y, (list, set, tuple)) and len(
x) > 0 and len(y) > 0 and x[0] == y[0]:
same_keys.add(x[0])
for y in default:
if not isinstance(y, (list, set, tuple)) or y[0] not in same_keys:
to_be_merged.append(y)
return to_be_merged
def find_and_parse_config(config_path):
"""Finds the service configuration file and parses it.
Checks also a directory called default, to check for default configuration values,
that will be overwritten by the actual configuration found on given path.
"""
config_filename = os.path.basename(config_path)
config_root = os.path.dirname(config_path)
default_root = os.path.join(config_root, 'default')
config_dict = {}
for config_dir in (default_root, config_root):
current_path = os.path.join(config_dir, config_filename)
if os.path.isfile(current_path):
with file(current_path, 'r') as f:
read_config_dict = yaml.load(f)
config_dict = merge_configs(read_config_dict, config_dict)
return config_dict
| StarcoderdataPython |
6697399 | <reponame>drhagen/nox-poetry
"""Unit tests for the poetry module."""
from pathlib import Path
from nox_poetry import poetry
def test_config_non_ascii(tmp_path: Path) -> None:
"""It decodes non-ASCII characters in pyproject.toml."""
text = """\
[tool.poetry]
name = "África"
"""
path = tmp_path / "pyproject.toml"
path.write_text(text, encoding="utf-8")
config = poetry.Config(path.parent)
assert config.name == "África"
| StarcoderdataPython |
11372825 | <filename>pymc/sandbox/test_twalk.py
from pymc.sandbox.TWalk import *
from pymc import *
from numpy import random, inf
import pdb
"""
Test model for T-walk algorithm:
Suppose x_{i,j} ~ Be( theta_j ), i=0,1,2,...,n_j-1, ind. j=0,1,2
But it is known that 0 < theta_0 < theta_3 < theta_2 < 1
"""
theta_true = array([ 0.4, 0.5, 0.7 ]) ### True thetas
n = array([ 20, 15, 40]) ### sample sizes
#### Simulated data, but we only need the sum of 1's
r = zeros(3)
for j in range(3):
r[j] = sum(random.random(size=n[j]) < theta_true[j])
@stochastic
def theta(value=(0.45, 0.5, 0.55)):
"""theta ~ beta(alpha, beta)"""
a,b,c = value
if not a<b<c:
return -inf
return uniform_like(value, 0, 1)
# Binomial likelihood
x = Binomial('x', n=n, p=theta, value=r, observed=True)
# Using standard Metropolis sampling
M = MCMC([theta, x])
M.use_step_method(TWalk, theta, inits=(0.3,0.4,0.5), verbose=1)
M.sample(50000, 40000, verbose=2)
Matplot.plot(M) | StarcoderdataPython |
165270 | <filename>bin/expr_parse.py
#!/usr/bin/env python2
"""
expr_parse.py -- Demo for translation.
types/run.sh expr-parse
"""
from __future__ import print_function
import sys
from _devbuild.gen import grammar_nt
from _devbuild.gen.syntax_asdl import source
from asdl import format as fmt
from core import alloc
from core import error
from core import meta
from core import pyutil
from core import ui
from frontend import reader
from frontend import lexer
from oil_lang import expr_parse
from oil_lang import expr_to_ast
from typing import List
def main(argv):
# type: (List[str]) -> int
arena = alloc.Arena()
arena.PushSource(source.Stdin(''))
loader = pyutil.GetResourceLoader()
oil_grammar = meta.LoadOilGrammar(loader)
parse_ctx = None
e_parser = expr_parse.ExprParser(parse_ctx, oil_grammar)
line_lexer = lexer.LineLexer('', arena)
line_reader = reader.FileLineReader(sys.stdin, arena)
lex = lexer.Lexer(line_lexer, line_reader)
try:
pnode, _ = e_parser.Parse(lex, grammar_nt.command_expr)
except error.Parse as e:
ui.PrettyPrintError(e, arena)
return 2
#print(pnode)
tr = expr_to_ast.Transformer(oil_grammar)
node = tr.Expr(pnode)
assert node is not None
tree = node.AbbreviatedTree()
#tree = node.PrettyTree()
ast_f = fmt.DetectConsoleOutput(sys.stdout)
fmt.PrintTree(tree, ast_f)
ast_f.write('\n')
return 0
if __name__ == '__main__':
try:
main(sys.argv)
except RuntimeError as e:
print('FATAL: %s' % e, file=sys.stderr)
sys.exit(1)
| StarcoderdataPython |
3281882 | <reponame>pingjuiliao/cb-multios
#!/usr/bin/env python
#
# Copyright (C) 2014 Narf Industries <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from generator.actions import Actions, Variable
import string
import random
import itertools
import math
from struct import *
class TemplateGenerator(Actions):
def start(self):
pass
def newReport(self):
#Release
num_fields = random.randint(3,3)
self.fields = []
self.records = []
self.record_length = 0
for _ in range(num_fields):
field_size = random.randint(10,20)
random_field = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(field_size))
self.fields.insert(0,[self.record_length, field_size, random_field])
self.record_length+= len(random_field)
for field in reversed(self.fields[1:]):
self.write(field[2] + ":")
self.write(self.fields[0][2] + ";")
#Vuln 1
# num_fields = random.randint(2, 3)
# self.fields = []
# self.records = []
# self.record_length = 0
# for num in range(num_fields):
# if num==num_fields-1:
# field_size = 32768 - self.record_length
# else:
# field_size = random.randint(7,10)
# random_field = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(field_size))
# self.fields.insert(0,[self.record_length, field_size, random_field])
# self.record_length+= len(random_field)
# for field in reversed(self.fields[1:]):
# self.write(field[2] + ":")
# self.write(self.fields[0][2] + ";")
# Vuln 2
# num_fields = random.randint(2, 3)
# self.fields = []
# self.records = []
# self.record_length = 0
# for num in range(num_fields):
# if num==0:
# field_size = 32768 + 32768 - 1024
# elif num==1:
# field_size = 2048
# else:
# field_size = random.randint(7,10)
# random_field = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(field_size))
# self.fields.insert(0,[self.record_length, field_size, random_field])
# self.record_length+= len(random_field)
# for field in reversed(self.fields[1:]):
# self.write(field[2] + ":")
# self.write(self.fields[0][2] + ";")
# Vuln 3
# num_fields = random.randint(2, 3)
# self.fields = []
# self.records = []
# self.record_length = 0
# for num in range(num_fields):
# if num==0:
# field_size = 1024
# elif num==1:
# field_size = 32768 + 32768 - 1024
# else:
# field_size = random.randint(7,10)
# random_field = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(field_size))
# self.fields.insert(0,[self.record_length, field_size, random_field])
# self.record_length+= len(random_field)
# for field in reversed(self.fields[1:]):
# self.write(field[2] + ":")
# self.write(self.fields[0][2] + ";")
# Vuln 4
# def newRecord(self):
# random_field = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(self.first_field_size))
# self.write(random_field + ":")
# self.write("ERROR")
# self.write(";")
# Vuln 3
# def newRecord(self):
# random_field = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(self.first_field_size))
# self.write(random_field + ":")
# num = self.second_field_size
# while(num >= 0):
# random_field = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(512))
# num-=512
# self.write(random_field)
# self.write(";")
# Vuln 2
# def newRecord(self):
# num = self.first_field_size
# while(num >= 0):
# random_field = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(1024))
# num-=512
# self.write(random_field)
# self.write(":")
# random_field = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(self.second_field_size))
# self.write(random_field + ";")
# Vuln 1
# def newRecord(self):
# random_field = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(self.first_field_size))
# self.write(random_field + ":")
# num = self.second_field_size
# while(num >= 0):
# random_field = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(1024))
# num-=4096
# self.write(random_field)
# self.write(";")
# def errorRecord(self):
# command = "ERROR"
# random_field = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(self.record_length))
# while(random_field[0:3] == "END" or random_field[0:6] == "REPORT" or random_field[0:4] == "SORT"):
# random_field = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(self.record_length))
# start = self.fields[0][0]
# random_field = random_field[0:start] + "ERROR" + random_field[start+5:len(random_field)]
# self.write(random_field)
def newRecord(self):
random_field = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(self.record_length))
while(random_field[0:3] == "END" or random_field[0:6] == "REPORT" or random_field[0:4] == "SORT"):
random_field = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(self.record_length))
self.write(random_field)
self.records.insert(0,random_field)
def sendReport(self):
command = "REPORT"
random_field = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(self.record_length-len(command)))
self.write(command + random_field)
for record in self.records:
self.read(length=self.record_length, expect=record)
def sortReport(self):
command = "SORT"
field_num = random.randint(0,len(self.fields)-1)
field_num_str = pack('H', field_num)
random_field = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(self.record_length-len(command)-len(field_num_str)))
self.write(command + field_num_str + random_field)
start = self.fields[field_num][0]
end = start + self.fields[field_num][1]
self.records.sort(key=lambda x: x[start:end])
def filterReport(self):
command = "FREPORT"
command = command + ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(self.fields[-1][1]-len(command)))
self.write(command)
bytes_written = len(command)
num_filters = random.randint(1, len(self.fields)-1)
filteredReport = []
filter_string = ''
random_field = ''
for field in reversed(self.fields[0:num_filters]):
filter_record = random.choice(self.records)
start = field[0]
end = start + field[1]
filter_string = filter_string + filter_record[start:end]
bytes_written += len(filter_string)
if num_filters < len(self.fields)-1:
random_field = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(self.record_length-bytes_written))
self.write(random_field)
self.write(filter_string)
temp_filter_string = "a"*len(command) + random_field + filter_string
for record in self.records:
for field in self.fields:
start = field[0]
end = start + field[1]
if((record[start:end] == temp_filter_string[start:end]) and (record not in set(filteredReport))):
filteredReport.insert(0, record)
for record in filteredReport:
self.read(length=self.record_length, expect=record)
def quit(self):
command = "END"
random_field = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(self.record_length-len(command)))
self.write(command + random_field)
return -1
| StarcoderdataPython |
1619166 | """ Group all classes from macro/build"""
from . import (
cavern_construction,
evochamber_construction,
expansion,
extractor_construction,
transformation_to_hive,
hydraden_construction,
transformation_to_lair,
pit_construction,
pool_construction,
spine_construction,
spire_construction,
spore_construction,
)
def get_build_commands(cmd):
""" Getter for all commands from macro/build"""
return (
pool_construction.PoolConstruction(cmd),
expansion.Expansion(cmd),
extractor_construction.ExtractorConstruction(cmd),
evochamber_construction.EvochamberConstruction(cmd),
cavern_construction.CavernConstruction(cmd),
pit_construction.PitConstruction(cmd),
transformation_to_hive.TransformationToHive(cmd),
transformation_to_lair.TransformationToLair(cmd),
spine_construction.SpineConstruction(cmd),
spore_construction.SporeConstruction(cmd),
spire_construction.SpireConstruction(cmd),
hydraden_construction.HydradenConstruction(cmd),
)
| StarcoderdataPython |
219122 | <filename>boxvariable.py
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
data = pd.read_csv("IrisDataSet.csv")
# box and whisker plots
data.plot(kind='box', subplots=True, layout=(2,2), sharex=False, sharey=False)
plt.show()
#https://machinelearningmastery.com/machine-learning-in-python-step-by-step/
| StarcoderdataPython |
350485 | <filename>src/villains/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-27 18:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('series', '0002_auto_20170223_1225'),
]
operations = [
migrations.CreateModel(
name='Villain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64, unique=True)),
('description', models.TextField(blank=True, default=None, null=True)),
('gender', models.CharField(blank=True, choices=[(b'female', b'Female'), (b'male', b'Male')], default=None, max_length=8, null=True)),
('type', models.CharField(blank=True, choices=[(b'boss', b'Boss'), (b'minion', b'Minion')], default=None, max_length=32, null=True)),
('homeworld', models.CharField(blank=True, default=None, max_length=32, null=True)),
('series', models.ManyToManyField(blank=True, related_name='villains', to='series.Series')),
],
options={
'ordering': ('name',),
},
),
]
| StarcoderdataPython |
12861830 | <filename>tests/test_qml.py
"""Tests for `prettyqt` package."""
import pathlib
import pytest
from prettyqt import core, qml
from prettyqt.utils import InvalidParamError
# def test_jsvalue():
# val = qml.JSValue(2)
# val["test"] = 1
# assert val["test"].toInt() == 1
# assert "test" in val
# assert val.get_value() == 2
def test_jsengine():
engine = qml.JSEngine()
engine.install_extensions("translation")
engine.eval("")
def test_qmlengine():
engine = qml.QmlEngine()
obj = core.Object()
engine.set_object_ownership(obj, "javascript")
with pytest.raises(InvalidParamError):
engine.set_object_ownership(obj, "test")
assert engine.get_object_ownership(obj) == "javascript"
engine.add_plugin_path("")
engine.add_import_path("")
engine.get_plugin_paths()
engine.get_import_paths()
def test_qmlapplicationengine(qtlog):
with qtlog.disabled():
engine = qml.QmlApplicationEngine()
for item in engine:
pass
path = pathlib.Path.cwd() / "tests" / "qmltest.qml"
engine.load_data(path.read_text())
def test_qmlcomponent():
comp = qml.QmlComponent()
assert comp.get_status() == "null"
# comp.load_url("", mode="asynchronous")
comp.get_url()
def test_jsvalue():
val = qml.JSValue(1)
assert val.get_error_type() is None
assert val.get_value() == 1
repr(val)
engine = qml.JSEngine()
val = engine.new_array(2)
val["test1"] = 1
val["test2"] = 2
assert val["test1"] == 1
assert "test2" in val
assert len(val) == 2
del val["test2"]
for n, v in val:
pass
val = qml.JSValue.from_object(None, engine)
val = qml.JSValue.from_object(1, engine)
val = qml.JSValue.from_object(["test"], engine)
val = qml.JSValue.from_object(dict(a="b"), engine)
| StarcoderdataPython |
3483005 | import matplotlib
matplotlib.use('agg') # now it works via ssh connection
import os
import mne
import sys
import glob
import pickle
sys.path.append('/home/dvmoors1/BB/ANALYSIS/DvM')
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from IPython import embed
from beh_analyses.PreProcessing import *
from eeg_analyses.EEG import *
from eeg_analyses.ERP import *
from eeg_analyses.BDM import *
from visuals.visuals import MidpointNormalize
from support.FolderStructure import *
from support.support import *
from stats.nonparametric import *
# subject specific info
sj_info = {'1': {'tracker': (False, '', '', '',0), 'replace':{}},
}
# project specific info
project = 'Linguistic'
part = 'beh'
project_param = []
montage = mne.channels.read_montage(kind='biosemi64')
# THIS IS WHAT IT SHOULD BE
#eog = ['V_up','V_do','H_r','H_l']
#ref = ['Ref_r','Ref_l']
# THIS IS WHAT IT IS
eog = ['Ref_r','Ref_l','V_up','V_do']
ref = ['H_r','H_l']
trigger = dict(neutral=10, positive=20, negative = 30)
t_min = 0.2
t_max = 2
flt_pad = 0.5
eeg_runs = [1] # 3 runs for subject 15 session 2
binary = 61440
# set general plotting parameters
sns.set(font_scale=2.5)
sns.set_style('ticks', {'xtick.major.size': 10, 'ytick.major.size': 10})
class Linguistic(FolderStructure):
def __init__(self): pass
def prepareEEG(self, sj, session, eog, ref, eeg_runs, t_min, t_max, flt_pad, sj_info, trigger, project_param, project_folder, binary, channel_plots, inspect):
'''
EEG preprocessing as preregistred @ https://osf.io/b2ndy/register/5771ca429ad5a1020de2872e
'''
# set subject specific parameters
file = 'subject_{}_session_{}_'.format(sj, session)
replace = sj_info[str(sj)]['replace']
#tracker, ext, t_freq, start_event, shift = sj_info[str(sj)]['tracker']
# start logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename= self.FolderTracker(extension=['processed', 'info'],
filename='preprocess_sj{}_ses{}.log'.format(
sj, session), overwrite = False),
filemode='w')
# READ IN RAW DATA, APPLY REREFERENCING AND CHANGE NAMING SCHEME
EEG = mne.concatenate_raws([RawBDF(os.path.join(project_folder, 'raw', file + '{}.bdf'.format(run)),
montage=None, preload=True, eog=eog) for run in eeg_runs])
# temp code to get rid of unneccassary data
to_remove = ['{}{}'.format(letter,i) for i in range(1,33) for letter in ['C','D','E','F','G','H']]
to_remove += ['GSR1','GSR2','Erg1','Erg2','Resp','Temp','Plet']
for i, elec in enumerate(to_remove):
if elec in ['F1','F2','F3','F4','F5','F6','F7','F8','C1','C2','C3','C4','C5','C6']:
to_remove[i] += '-1'
EEG.drop_channels(to_remove)
#EEG.replaceChannel(sj, session, replace)
EEG.reReference(ref_channels=ref, vEOG=eog[
:2], hEOG=eog[2:], changevoltage=True, to_remove = ['EXG7','EXG8'])
EEG.setMontage(montage='biosemi64')
#FILTER DATA FOR EPOCHING
EEG.filter(h_freq=None, l_freq=0.1, fir_design='firwin',
skip_by_annotation='edge')
# MATCH BEHAVIOR FILE
events = EEG.eventSelection(trigger, binary=binary, min_duration=0)
#beh, missing, events = self.matchBeh(sj, session, events, trigger,
# headers = project_param)
# EPOCH DATA
epochs = Epochs(sj, session, EEG, events, event_id=trigger,
tmin=t_min, tmax=t_max, baseline=(None, None), flt_pad = flt_pad)
# ARTIFACT DETECTION
#epochs.selectBadChannels(channel_plots = channel_plots, inspect=inspect, RT = None)
epochs.artifactDetection(inspect=inspect, run = True)
# INTERPOLATE BADS
epochs.interpolate_bads(reset_bads=True, mode='accurate')
# save eeg
epochs.save(self.FolderTracker(extension=[
'processed'], filename='subject-{}_all-epo.fif'.format(sj, session)), split_size='2GB')
def checkN400(self, beh, eeg, time = (-0.2, 0.8), elec = ['Fz']):
eeg.filter(l_freq = None, h_freq = 30)
s, e = [np.argmin(abs(eeg.times - t)) for t in time]
elec_idx = [eeg.ch_names.index(el) for el in elec]
eegs = eeg._data[:,elec_idx,s:e]
times = eeg.times[s:e]
for label, cnd in [(10,'negative'),(20,'neutral'),(30,'positive')]:
# read in condition data
data = eeg[cnd]._data[:,elec_idx,s:e]
# do baselining (using functionality from toolbox)
data = ERP.baselineCorrect(data, times, (-0.2,0))
# create ERP
erp = data.mean(axis = (0,1))
plt.plot(times, erp, label = cnd)
plt.legend(loc = 'best')
sns.despine(offset= 0, trim = False)
plt.savefig(PO.FolderTracker(['erp','figs'], filename = 'N400-test.pdf'))
plt.close()
if __name__ == '__main__':
#os.environ['MKL_NUM_THREADS'] = '5'
#os.environ['NUMEXP_NUM_THREADS'] = '5'
#os.environ['OMP_NUM_THREADS'] = '5'
# Specify project parameters
project_folder = '/home/dvmoors1/BB/Linguistic'
os.chdir(project_folder)
PO = Linguistic()
# run actual preprocessing
for sj in [1]:
print 'starting subject {}'.format(sj)
# do preprocessing
#PO.prepareEEG(1, 1, eog, ref, eeg_runs, t_min, t_max, flt_pad, sj_info, trigger, project_param, project_folder, binary, True, True)
# run decoding
beh, eeg = PO.loadData(sj, False, beh_file = False)
embed()
PO.checkN400(beh, eeg)
bdm = BDM(beh, eeg, to_decode = 'condition', nr_folds = 10, method = 'acc', elec_oi = 'all', downsample = 128)
bdm.Classify(sj, cnds = 'all', cnd_header = 'condition', time = (-0.2, 2), bdm_labels = [10,20],gat_matrix = False) | StarcoderdataPython |
4984197 | import cv2
import numpy as np
def findSameAngle(points, errorRate = 0.2, minR = 10):
편차 = points - points.mean((0,1)).reshape(1, 1, 2)
# N , 1, 2
제곱 = 편차 ** 2
# N , 1, 2,
반지름 = np.sqrt( 제곱.sum((1,2)))
# N
반지름평균 = 반지름.mean()
if(반지름평균 > minR):
반지름비율 = 반지름 / 반지름평균 # 다른언어에서 변환 타입 주의! 파이썬은 자동으로 실수
for i in range(반지름비율.size): # iter를 줄이고 싶으면 반만 해도 됨.
if (1 + errorRate < 반지름비율[i]) or (반지름비율[i] < 1 - errorRate):
return False
else:
return False
return True
cap = cv2.VideoCapture('video2.mp4')
frame_size = (int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), 1)
lower_red = np.array([0, 50, 50])
upper_red = np.array([20, 255, 255])
lower_blue = np.array([100, 150, 0],np.uint8)
upper_blue = np.array([140, 255, 255],np.uint8)
kernel = np.ones((4, 4), np.uint8)
while True:
retval, frame_out = cap.read() # 프레임 캡처
if(retval == None):
break;
frame = frame_out[0:frame_size[0] *3 // 4, :]
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower_red, upper_red)
mask1 = cv2.inRange(hsv, lower_blue, upper_blue)
dilation = cv2.dilate(mask, kernel, iterations=1)
dilation1 = cv2.dilate(mask1, kernel, iterations=1)
contours, _ = cv2.findContours(
dilation, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours1, _ = cv2.findContours(
dilation1, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
approx = cv2.approxPolyDP(
cnt, 0.07 * cv2.arcLength(cnt, False), True)
if len(approx) == 3:
if(findSameAngle(approx)):
approx.shape = [1,3,2]
cv2.polylines(frame, approx, True, (0,0,255), 2, cv2.LINE_4)
for cnt in contours1:
approx = cv2.approxPolyDP(
cnt, 0.07 * cv2.arcLength(cnt, False), True)
if len(approx) == 4:
if(findSameAngle(approx)):
approx.shape = [1,4,2]
cv2.polylines(frame, approx, True, (255,0,0), 5, cv2.LINE_4)
cv2.imshow('frame_out',frame_out)
key = cv2.waitKey(25)
if key == 27:
break
if cap.isOpened():
cap.release()
cv2.destroyAllWindows()
| StarcoderdataPython |
11393418 | <filename>Source/Thttil/__init__.py
# MIT License
# Copyright (c) 2019 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
""" Thttil runtime API module.
"""
import antlr4
from .ThttilLexer import ThttilLexer
from .ThttilParser import ThttilParser
from .ThttilCommand import ThttilCommand
from .ThttilVisitor import ThttilVisitor
from .ThttilTreeParser import ThttilTreeParser
from .ThttilFileStream import ThttilFileStream
from .ThttilStreamBuffer import ThttilStreamBuffer
from .ThttilVariablePool import ThttilVariablePool
from .ThttilErrorHandler import ThttilErrorHandler
from .ThttilTokenRewriter import ThttilTokenRewriter
from .ThttilCommandCollection import ThttilCommandCollection
from .ThttilCommandReturnType import ThttilCommandReturnType
from .ThttilCommandInterpreter import ThttilCommandInterpreter
from .ThttilRuntimeErrorHandler import ThttilRuntimeErrorHandler | StarcoderdataPython |
325313 | <filename>cloudentries/common/lifecycles/security_group.py
# Copyright (c) 2021 Qianyun, Inc. All rights reserved.
from abstract_plugin.platforms.common.base import CommonResource
class CommonSecurityGroup(CommonResource):
pass
| StarcoderdataPython |
9788065 | <reponame>GarrickHe/sonic-mgmt
"""This module provides ptfadapter fixture to be used by tests to send/receive traffic via PTF ports"""
import pytest
from ptfadapter import PtfTestAdapter
from ansible_host import AnsibleHost
DEFAULT_PTF_NN_PORT = 10900
DEFAULT_DEVICE_NUM = 0
ETH_PFX = 'eth'
def get_ifaces(netdev_output):
""" parse /proc/net/dev content
:param netdev_output: content of /proc/net/dev
:return: interface names list
"""
ifaces = []
for line in netdev_output.split('\n'):
# Skip a header
if ':' not in line:
continue
iface = line.split(':')[0].strip()
# Skip not FP interfaces
if ETH_PFX not in iface:
continue
ifaces.append(iface)
return ifaces
@pytest.fixture(scope='module')
def ptfadapter(ptfhost, testbed):
"""return ptf test adapter object.
The fixture is module scope, because usually there is not need to
restart PTF nn agent and reinitialize data plane thread on every
test class or test function/method. Session scope should also be Ok,
however if something goes really wrong in one test module it is safer
to restart PTF before proceeding running other test modules
"""
# get the eth interfaces from PTF and initialize ifaces_map
res = ptfhost.command('cat /proc/net/dev')
ifaces = get_ifaces(res['stdout'])
ifaces_map = {int(ifname.replace(ETH_PFX, '')): ifname for ifname in ifaces}
# generate supervisor configuration for ptf_nn_agent
ptfhost.host.options['variable_manager'].extra_vars = {
'device_num': DEFAULT_DEVICE_NUM,
'ptf_nn_port': DEFAULT_PTF_NN_PORT,
'ifaces_map': ifaces_map,
}
ptfhost.template(src='ptfadapter/templates/ptf_nn_agent.conf.ptf.j2',
dest='/etc/supervisor/conf.d/ptf_nn_agent.conf')
# reread configuration and update supervisor
ptfhost.command('supervisorctl reread')
ptfhost.command('supervisorctl update')
with PtfTestAdapter(testbed['ptf_ip'], DEFAULT_PTF_NN_PORT, 0, len(ifaces_map)) as adapter:
yield adapter
| StarcoderdataPython |
11370107 | # Generated by Django 3.0.8 on 2020-07-28 18:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='NotificationSetting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action_type', models.CharField(max_length=255)),
('method', models.CharField(choices=[('mail', 'E-mail')], max_length=255)),
('enabled', models.BooleanField(default=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notification_settings', to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('user', 'action_type', 'method')},
},
),
]
| StarcoderdataPython |
6641888 | <filename>videoflow/utils/system.py
import subprocess
import os
def get_number_of_gpus() -> int:
'''
Returns the number of gpus in the system
'''
try:
n = str(subprocess.check_output(["nvidia-smi", "-L"])).count('UUID')
return n
except FileNotFoundError:
return 0
def get_system_gpus() -> set:
'''
Returns the ids of gpus in the machine as a set of integers
'''
n = get_number_of_gpus()
return set(range(n))
def get_gpus_available_to_process() -> [int]:
'''
Returns the list of ids of the gpus available to the process calling the function.
It first gets the set of ids of the gpus in the system. Then it gets the set of ids marked as
available by ``CUDA_VISIBLE_DEVICES``. It returns the intersection of those
two sets as a list.
'''
system_devices = get_system_gpus()
env_var = os.environ.get('CUDA_VISIBLE_DEVICES', None)
if env_var is None:
visible_devices = set(system_devices)
else:
env_var = env_var.strip()
visible_devices = set()
if len(env_var) > 0:
devices = env_var.split(',')
for device in devices:
try:
device_id = int(device)
visible_devices.add(device_id)
except:
pass
available_devices = system_devices & visible_devices
return list(available_devices)
| StarcoderdataPython |
8072353 | <filename>bazel/docker/bazel_dependencies.bzl
def rules_docker_dependencies():
native.git_repository(
name = "io_bazel_rules_docker",
remote = "https://github.com/bazelbuild/rules_docker.git",
tag = "v0.5.1"
)
def rules_package_manager_dependencies():
native.git_repository(
name = "distroless",
remote = "https://github.com/GoogleCloudPlatform/distroless.git",
commit = "<PASSWORD>",
)
| StarcoderdataPython |
222879 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a parser for WinRAR for Plaso."""
import re
from plaso.events import windows_events
from plaso.parsers import winreg
from plaso.parsers.winreg_plugins import interface
__author__ = 'David Nides (<EMAIL>)'
class WinRarHistoryPlugin(interface.KeyPlugin):
"""Windows Registry plugin for parsing WinRAR History keys."""
# TODO: Create NTUSER.DAT test file with WinRAR data.
NAME = 'winreg_winrar'
DESCRIPTION = u'Parser for WinRAR History Registry data.'
REG_TYPE = 'NTUSER'
REG_KEYS = [
u'\\Software\\WinRAR\\DialogEditHistory\\ExtrPath',
u'\\Software\\WinRAR\\DialogEditHistory\\ArcName',
u'\\Software\\WinRAR\\ArcHistory']
_RE_VALUE_NAME = re.compile(r'^[0-9]+$', re.I)
def GetEntries(
self, parser_context, key=None, registry_type=None, **unused_kwargs):
"""Collect values under WinRAR ArcHistory and return event for each one.
Args:
parser_context: A parser context object (instance of ParserContext).
key: Optional Registry key (instance of winreg.WinRegKey).
The default is None.
registry_type: Optional Registry type string. The default is None.
"""
for value in key.GetValues():
# Ignore any value not in the form: '[0-9]+'.
if not value.name or not self._RE_VALUE_NAME.search(value.name):
continue
# Ignore any value that is empty or that does not contain a string.
if not value.data or not value.DataIsString():
continue
if value.name == '0':
timestamp = key.last_written_timestamp
else:
timestamp = 0
text_dict = {}
text_dict[value.name] = value.data
# TODO: shouldn't this behavior be, put all the values
# into a single event object with the last written time of the key?
event_object = windows_events.WindowsRegistryEvent(
timestamp, key.path, text_dict, offset=key.offset,
registry_type=registry_type,
source_append=': WinRAR History')
parser_context.ProduceEvent(event_object, plugin_name=self.NAME)
winreg.WinRegistryParser.RegisterPlugin(WinRarHistoryPlugin)
| StarcoderdataPython |
6579258 | from abc import ABCMeta
class ValueObject(object):
__metaclass__ = ABCMeta
def __init__(self, value):
self.__value = value
def get_value(self):
return self.__value
def __eq__(self, other):
return self.get_value() == other.get_value() \
and isinstance(other, self.__class__)
def __repr__(self):
return "ValueObject({})".format(self.get_value())
| StarcoderdataPython |
1826305 | <gh_stars>1-10
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render
from django.contrib import messages
from apps.Accused.models import AccusedPerson
from apps.Cells.forms import AddCellForm, EditCellForm
from apps.Cells.models import Cell
from apps.Users.models import Profile
# Create your views here.
@login_required(login_url='Login')
def OfficerCells(request):
form = AddCellForm()
username = request.user
profile = Profile.objects.get(user=username.id)
cells = Cell.objects.filter(created_by=profile.id).all().order_by('-date_created')
return render(request, 'Officer Cells.html', {'cells':cells, 'form':form})
def AddCellInfo(request):
profile = request.user.profile
form = AddCellForm()
if request.method == 'POST':
form = AddCellForm(request.POST)
if form.is_valid():
cell_number = form.cleaned_data['cell_number']
accused_person = form.cleaned_data['accused_person']
cell_status = form.cleaned_data['cell_status']
occupied_on = form.cleaned_data['occupied_on']
vaccated_on = form.cleaned_data['vaccated_on']
accused_person_obj = AccusedPerson.objects.get(pk=int(accused_person))
new_cell_info = Cell(cell_number = cell_number, accused_person = accused_person_obj, cell_status = cell_status, occupied_on = occupied_on, vaccated_on = vaccated_on, created_by=profile)
new_cell_info.save()
messages.success(request, '✅ Cell Record Successfully Created!')
return redirect('OfficerCells')
else:
messages.error(request, '⚠️ Cell Record Was Not Created!')
return redirect('OfficerCells')
else:
form = AddCellForm()
return redirect('OfficerCells')
def EditCellInfo(request, id):
cell = Cell.objects.get(id=id)
profile = request.user
if request.method == 'POST':
context = {'has_error': False}
cell_number = request.POST['cell_number']
accused_person = request.POST['accused_person']
cell_status = request.POST['cell_status']
occupied_on = request.POST['occupied_on']
vaccated_on = request.POST['vaccated_on']
cell.cell_number = cell_number
cell.accused_person = AccusedPerson.objects.get(pk=int(accused_person))
cell.cell_status = cell_status
cell.occupied_on = occupied_on
cell.vaccated_on = vaccated_on
cell.created_by = request.user.profile
if not context['has_error']:
cell.save()
messages.success(request, '✅ Cell Record Successfully Updated!')
return redirect('OfficerCells')
else:
messages.error(request, '⚠️ Cell Record Was Not Updated!')
return redirect('OfficerCells')
return redirect('OfficerCells')
def ViewCellDetails(request, id):
cell_details = Cell.objects.get(id=id)
return render(request, 'Officer Cells.html', {'cell_details':cell_details})
def DeleteCellInfo(request, id):
cell_details = Cell.objects.get(id=id)
cell_details.delete()
messages.success(request, '✅ Cell Record Successfully Deleted!')
return redirect('OfficerCells') | StarcoderdataPython |
13473 | <reponame>RuanBarretodosSantos/python
cont = 3
t1 = 0
t2 = 1
print('-----' * 12)
print('Sequência de Fibonacci')
print('-----' * 12)
valor = int(input('Quantos termos você quer mostrar ? '))
print('~~~~~' * 12)
print(f'{t1} ➙ {t2} ' , end='➙ ')
while cont <= valor:
t3 = t1 + t2
print(f' {t3}', end=' ➙ ')
t1 = t2
t2 = t3
t3 = t1
cont += 1
print(' F I M')
| StarcoderdataPython |
1829840 | """
Maps: ComboMaps
===============
We will use an example where we want a 1D layered earth as our model,
but we want to map this to a 2D discretization to do our forward
modeling. We will also assume that we are working in log conductivity
still, so after the transformation we map to conductivity space.
To do this we will introduce the vertical 1D map
(:class:`SimPEG.maps.SurjectVertical1D`), which does the first part of
what we just described. The second part will be done by the
:class:`SimPEG.maps.ExpMap` described above.
.. code-block:: python
:linenos:
M = discretize.TensorMesh([7,5])
v1dMap = maps.SurjectVertical1D(M)
expMap = maps.ExpMap(M)
myMap = expMap * v1dMap
m = np.r_[0.2,1,0.1,2,2.9] # only 5 model parameters!
sig = myMap * m
If you noticed, it was pretty easy to combine maps. What is even cooler
is that the derivatives also are made for you (if everything goes
right). Just to be sure that the derivative is correct, you should
always run the test on the mapping that you create.
"""
import discretize
from SimPEG import maps
import numpy as np
import matplotlib.pyplot as plt
def run(plotIt=True):
M = discretize.TensorMesh([7, 5])
v1dMap = maps.SurjectVertical1D(M)
expMap = maps.ExpMap(M)
myMap = expMap * v1dMap
m = np.r_[0.2, 1, 0.1, 2, 2.9] # only 5 model parameters!
sig = myMap * m
if not plotIt:
return
figs, axs = plt.subplots(1, 2)
axs[0].plot(m, M.vectorCCy, "b-o")
axs[0].set_title("Model")
axs[0].set_ylabel("Depth, y")
axs[0].set_xlabel("Value, $m_i$")
axs[0].set_xlim(0, 3)
axs[0].set_ylim(0, 1)
clbar = plt.colorbar(
M.plotImage(sig, ax=axs[1], grid=True, gridOpts=dict(color="grey"))[0]
)
axs[1].set_title("Physical Property")
axs[1].set_ylabel("Depth, y")
clbar.set_label("$\sigma = \exp(\mathbf{P}m)$")
plt.tight_layout()
if __name__ == "__main__":
run()
plt.show()
| StarcoderdataPython |
6622532 | <filename>qa/tasks/util/rados.py
import logging
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
def rados(ctx, remote, cmd, wait=True, check_status=False):
testdir = teuthology.get_testdir(ctx)
log.info("rados %s" % ' '.join(cmd))
pre = [
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'rados',
];
pre.extend(cmd)
proc = remote.run(
args=pre,
check_status=check_status,
wait=wait,
)
if wait:
return proc.exitstatus
else:
return proc
def create_ec_pool(remote, name, profile_name, pgnum, profile={}, cluster_name="ceph", application=None):
remote.run(args=['sudo', 'ceph'] +
cmd_erasure_code_profile(profile_name, profile) + ['--cluster', cluster_name])
remote.run(args=[
'sudo', 'ceph', 'osd', 'pool', 'create', name,
str(pgnum), str(pgnum), 'erasure', profile_name, '--cluster', cluster_name
])
if application:
remote.run(args=[
'sudo', 'ceph', 'osd', 'pool', 'application', 'enable', name, application, '--cluster', cluster_name
], check_status=False) # may fail as EINVAL when run in jewel upgrade test
def create_replicated_pool(remote, name, pgnum, cluster_name="ceph", application=None):
remote.run(args=[
'sudo', 'ceph', 'osd', 'pool', 'create', name, str(pgnum), str(pgnum), '--cluster', cluster_name
])
if application:
remote.run(args=[
'sudo', 'ceph', 'osd', 'pool', 'application', 'enable', name, application, '--cluster', cluster_name
], check_status=False)
def create_cache_pool(remote, base_name, cache_name, pgnum, size, cluster_name="ceph"):
remote.run(args=[
'sudo', 'ceph', 'osd', 'pool', 'create', cache_name, str(pgnum), '--cluster', cluster_name
])
remote.run(args=[
'sudo', 'ceph', 'osd', 'tier', 'add-cache', base_name, cache_name,
str(size), '--cluster', cluster_name
])
def cmd_erasure_code_profile(profile_name, profile):
"""
Return the shell command to run to create the erasure code profile
described by the profile parameter.
:param profile_name: a string matching [A-Za-z0-9-_.]+
:param profile: a map whose semantic depends on the erasure code plugin
:returns: a shell command as an array suitable for Remote.run
If profile is {}, it is replaced with
{ 'k': '2', 'm': '1', 'crush-failure-domain': 'osd'}
for backward compatibility. In previous versions of teuthology,
these values were hardcoded as function arguments and some yaml
files were designed with these implicit values. The teuthology
code should not know anything about the erasure code profile
content or semantic. The valid values and parameters are outside
its scope.
"""
if profile == {}:
profile = {
'k': '2',
'm': '1',
'crush-failure-domain': 'osd'
}
return [
'osd', 'erasure-code-profile', 'set',
profile_name
] + [ str(key) + '=' + str(value) for key, value in profile.items() ]
| StarcoderdataPython |
4861215 | import gym
from agent.DQN import DeepQNetwork
import numpy as np
import argparse
import matplotlib.pyplot as plt
def train(RL, env):
total_steps = 0
observation = env.reset()
while True:
# if total_steps - MEMORY_SIZE > 8000: env.render()
action = RL.choose_action(observation)
f_action = (action-(ACTION_SPACE-1)/2)/((ACTION_SPACE-1)/4) # convert to [-2 ~ 2] float actions
observation_, reward, done, info = env.step(np.array([f_action]))
reward /= 10
RL.store_transition(observation, action, reward, observation_)
if total_steps > RL.batch_size: # learning
RL.learn()
if total_steps - RL.batch_size > 20000: # stop game
break
observation = observation_
total_steps += 1
RL.plot_Q_value('Q_value_figure', 'DQN')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--double', default=False)
parser.add_argument('--prioritized', default=False)
parser.add_argument('--dueling', default=False)
args = parser.parse_args()
env = gym.make('Pendulum-v0')
env = env.unwrapped
env.seed(1)
ACTION_SPACE = 11
agent = DeepQNetwork(ACTION_SPACE, 3, use_double_q=args.double,
prioritized=args.prioritized, dueling=args.dueling)
train(agent, env)
| StarcoderdataPython |
5089002 | from enum import IntFlag
from typing import Optional, Union
from .operation import Operation
from .utils import check_ed25519_public_key
from .. import xdr as stellar_xdr
from ..keypair import Keypair
from ..signer import Signer
from ..strkey import StrKey
__all__ = ["AuthorizationFlag", "SetOptions"]
class AuthorizationFlag(IntFlag):
"""Indicates which flags to set. For details about the flags,
please refer to the `accounts doc <https://www.stellar.org/developers/guides/concepts/accounts.html>`_.
The bit mask integer adds onto the existing flags of the account.
"""
AUTHORIZATION_REQUIRED = 1
AUTHORIZATION_REVOCABLE = 2
AUTHORIZATION_IMMUTABLE = 4
AUTHORIZATION_CLAWBACK_ENABLED = 8
class SetOptions(Operation):
"""The :class:`SetOptions` object, which represents a SetOptions operation
on Stellar's network.
This operation sets the options for an account.
For more information on the signing options, please refer to the `multi-sig
doc <https://www.stellar.org/developers/guides/concepts/multi-sig.html>`_.
When updating signers or other thresholds, the threshold of this operation
is high.
Threshold: Medium or High
:param inflation_dest: Account of the inflation destination.
:param clear_flags: Indicates which flags to clear. For details about the flags,
please refer to the `accounts doc <https://www.stellar.org/developers/guides/concepts/accounts.html>`_.
The `bit mask <https://en.wikipedia.org/wiki/Bit_field>`_ integer subtracts from the existing flags of the account.
This allows for setting specific bits without knowledge of existing flags, you can also use
:class:`stellar_sdk.operation.set_options.AuthorizationFlag`
- AUTHORIZATION_REQUIRED = 1
- AUTHORIZATION_REVOCABLE = 2
- AUTHORIZATION_IMMUTABLE = 4
- AUTHORIZATION_CLAWBACK_ENABLED = 8
:param set_flags: Indicates which flags to set. For details about the flags,
please refer to the `accounts doc <https://www.stellar.org/developers/guides/concepts/accounts.html>`_.
The bit mask integer adds onto the existing flags of the account.
This allows for setting specific bits without knowledge of existing flags, you can also use
:class:`stellar_sdk.operation.set_options.AuthorizationFlag`
- AUTHORIZATION_REQUIRED = 1
- AUTHORIZATION_REVOCABLE = 2
- AUTHORIZATION_IMMUTABLE = 4
- AUTHORIZATION_CLAWBACK_ENABLED = 8
:param master_weight: A number from 0-255 (inclusive) representing the weight of the master key.
If the weight of the master key is updated to 0, it is effectively disabled.
:param low_threshold: A number from 0-255 (inclusive) representing the threshold this account sets on all
operations it performs that have `a low threshold <https://www.stellar.org/developers/guides/concepts/multi-sig.html>`_.
:param med_threshold: A number from 0-255 (inclusive) representing the threshold this account sets on all
operations it performs that have `a medium threshold <https://www.stellar.org/developers/guides/concepts/multi-sig.html>`_.
:param high_threshold: A number from 0-255 (inclusive) representing the threshold this account sets on all
operations it performs that have `a high threshold <https://www.stellar.org/developers/guides/concepts/multi-sig.html>`_.
:param home_domain: sets the home domain used for
reverse `federation <https://www.stellar.org/developers/guides/concepts/federation.html>`_ lookup.
:param signer: Add, update, or remove a signer from the account.
:param source: The source account (defaults to transaction source).
"""
_XDR_OPERATION_TYPE: stellar_xdr.OperationType = (
stellar_xdr.OperationType.SET_OPTIONS
)
def __init__(
self,
inflation_dest: str = None,
clear_flags: Union[int, AuthorizationFlag] = None,
set_flags: Union[int, AuthorizationFlag] = None,
master_weight: int = None,
low_threshold: int = None,
med_threshold: int = None,
high_threshold: int = None,
signer: Signer = None,
home_domain: str = None,
source: str = None,
) -> None:
super().__init__(source)
if inflation_dest is not None:
check_ed25519_public_key(inflation_dest)
if isinstance(set_flags, AuthorizationFlag):
set_flags = set_flags.value
if isinstance(clear_flags, AuthorizationFlag):
clear_flags = clear_flags.value
self.inflation_dest = inflation_dest
self.clear_flags: int = clear_flags # type: ignore[assignment]
self.set_flags: int = set_flags # type: ignore[assignment]
self.master_weight = master_weight
self.low_threshold = low_threshold
self.med_threshold = med_threshold
self.high_threshold = high_threshold
self.home_domain = home_domain
self.signer: Optional[Signer] = signer
def _to_operation_body(self) -> stellar_xdr.OperationBody:
inflation_dest = (
Keypair.from_public_key(self.inflation_dest).xdr_account_id()
if self.inflation_dest is not None
else None
)
home_domain = (
stellar_xdr.String32(bytes(self.home_domain, encoding="utf-8"))
if self.home_domain is not None
else None
)
clear_flags = (
None if self.clear_flags is None else stellar_xdr.Uint32(self.clear_flags)
)
set_flags = (
None if self.set_flags is None else stellar_xdr.Uint32(self.set_flags)
)
master_weight = (
None
if self.master_weight is None
else stellar_xdr.Uint32(self.master_weight)
)
low_threshold = (
None
if self.low_threshold is None
else stellar_xdr.Uint32(self.low_threshold)
)
med_threshold = (
None
if self.med_threshold is None
else stellar_xdr.Uint32(self.med_threshold)
)
high_threshold = (
None
if self.high_threshold is None
else stellar_xdr.Uint32(self.high_threshold)
)
signer = None if self.signer is None else self.signer.to_xdr_object()
set_options_op = stellar_xdr.SetOptionsOp(
inflation_dest,
clear_flags,
set_flags,
master_weight,
low_threshold,
med_threshold,
high_threshold,
home_domain,
signer,
)
body = stellar_xdr.OperationBody(
type=self._XDR_OPERATION_TYPE, set_options_op=set_options_op
)
return body
@classmethod
def from_xdr_object(cls, xdr_object) -> "SetOptions":
"""Creates a :class:`SetOptions` object from an XDR Operation
object.
"""
source = Operation.get_source_from_xdr_obj(xdr_object)
inflation_dest = None
if xdr_object.body.set_options_op.inflation_dest:
inflation_dest = StrKey.encode_ed25519_public_key(
xdr_object.body.set_options_op.inflation_dest.account_id.ed25519.uint256
)
clear_flags_xdr = xdr_object.body.set_options_op.clear_flags
set_flags_xdr = xdr_object.body.set_options_op.set_flags
master_weight_xdr = xdr_object.body.set_options_op.master_weight
low_threshold_xdr = xdr_object.body.set_options_op.low_threshold
med_threshold_xdr = xdr_object.body.set_options_op.med_threshold
high_threshold_xdr = xdr_object.body.set_options_op.high_threshold
home_domain_xdr = xdr_object.body.set_options_op.home_domain
signer_xdr_object = xdr_object.body.set_options_op.signer
clear_flags = None if clear_flags_xdr is None else clear_flags_xdr.uint32
set_flags = None if set_flags_xdr is None else set_flags_xdr.uint32
master_weight = None if master_weight_xdr is None else master_weight_xdr.uint32
low_threshold = None if low_threshold_xdr is None else low_threshold_xdr.uint32
med_threshold = None if med_threshold_xdr is None else med_threshold_xdr.uint32
high_threshold = (
None if high_threshold_xdr is None else high_threshold_xdr.uint32
)
home_domain = None if home_domain_xdr is None else home_domain_xdr.string32
signer = (
None
if signer_xdr_object is None
else Signer.from_xdr_object(signer_xdr_object)
)
if home_domain is not None:
home_domain = home_domain.decode("utf-8")
op = cls(
inflation_dest=inflation_dest,
clear_flags=clear_flags,
set_flags=set_flags,
master_weight=master_weight,
low_threshold=low_threshold,
med_threshold=med_threshold,
high_threshold=high_threshold,
home_domain=home_domain,
signer=signer,
source=source,
)
op._source_muxed = Operation.get_source_muxed_from_xdr_obj(xdr_object)
return op
def __str__(self):
return (
f"<SetOptions [inflation_dest={self.inflation_dest}, "
f"clear_flags={self.clear_flags}, "
f"set_flags={self.set_flags}, "
f"master_weight={self.master_weight}, "
f"low_threshold={self.low_threshold}, "
f"med_threshold={self.med_threshold}, "
f"high_threshold={self.high_threshold}, "
f"signer={self.signer}, "
f"home_domain={self.home_domain}, "
f"source={self.source}]>"
)
| StarcoderdataPython |
3313416 | """
Basic Character Redaction
"""
import sys
try:
import pandas as pd
except ImportError:
pd = None
from gretel_client.transformers import (
RedactWithCharConfig,
DataPath,
DataTransformPipeline,
StringMask,
)
xf = [RedactWithCharConfig()]
xf2 = [RedactWithCharConfig(char="Y")]
paths = [
DataPath(input="foo", xforms=xf),
DataPath(input="bar", xforms=xf2),
DataPath(input="*"),
]
pipe = DataTransformPipeline(paths)
rec = {"foo": "hello", "bar": "there", "baz": "world"}
out = pipe.transform_record(rec)
assert out == {"foo": "XXXXX", "bar": "YYYYY", "baz": "world"}
print(out)
# Now let's do partial redactions
mask_1 = StringMask(
start_pos=3
) # let's only keep the first few chars of an email address
mask_2 = StringMask(
mask_after="@"
) # let's only mask the domain part of the email address
xf_1 = [RedactWithCharConfig(mask=[mask_1])]
xf_2 = [RedactWithCharConfig(mask=[mask_2])]
paths = [
DataPath(input="email", xforms=[xf_1]),
DataPath(input="email_2", xforms=[xf_2]),
DataPath(input="*"),
]
pipe = DataTransformPipeline(paths)
rec = {
"email": "<EMAIL>",
"email_2": "<EMAIL>",
}
out = pipe.transform_record(rec)
print(out)
assert out == {
"email": "monXXXXXXX.XXXXX@XXXXXXXXXXX.XXX",
"email_2": "<EMAIL>XXX.XXX",
}
####################
# DataFrame Version
####################
if pd is None:
print("Skipping DataFrame version, Pandas not installed!")
sys.exit(1)
records = [
{"name": "Homer", "id": 1234, "email": "<EMAIL>"},
{"name": "Monty", "id": 5678, "email_2": "<EMAIL>"},
]
df = pd.DataFrame(records)
transformed_df = pipe.transform_df(df)
assert transformed_df.to_dict(orient="records") == [
{
"email": "homXX.X.XXXXXXX@XXXXXXXXXXX.XXX",
"email_2": None,
"id": 1234,
"name": "Homer",
},
{
"email": None,
"email_2": "mongtomery.burns@XXXXXXXXXXX.XXX",
"id": 5678,
"name": "Monty",
},
]
| StarcoderdataPython |
3304964 | <gh_stars>1-10
import unittest
from bltest import attr
from lace.cache import sc
from lace.serialization import wrl, obj
@attr('missing_assets')
class TestWRL(unittest.TestCase):
def setUp(self):
self.test_wrl_url = "s3://bodylabs-korper-assets/is/ps/shared/data/body/korper_testdata/test_wrl.wrl"
self.test_wrl_path = sc(self.test_wrl_url)
self.test_obj_url = "s3://bodylabs-korper-assets/is/ps/shared/data/body/korper_testdata/test_box.obj"
self.test_wrl_converted_path = sc("s3://bodylabs-korper-assets/is/ps/shared/data/body/korper_testdata/test_wrl_converted.obj")
def test_loads_from_open_file_using_serializer(self):
with open(self.test_wrl_path) as f:
m = wrl.load(f)
with open(self.test_wrl_converted_path) as f:
m_truth = obj.load(f)
self.assertTrue((m.v == m_truth.v).all())
self.assertTrue((m.f == m_truth.f).all())
def test_loads_unsupported_format_raise_exception(self):
with self.assertRaises(wrl.ParseError):
with open(sc(self.test_obj_url)) as f:
wrl.load(f)
| StarcoderdataPython |
8005564 | <gh_stars>1-10
from dodo_commands.framework.command_map import get_command_map as _get_command_map
from dodo_commands.framework.command_path import get_command_dirs_from_config
from dodo_commands.framework.container.facets import (
Commands,
Config,
Layers,
i_,
o_,
register,
)
from dodo_commands.framework.get_aliases import get_aliases
from dodo_commands.framework.inferred_commands import (
get_inferred_command_map as _get_inferred_command_map,
)
# COMMANDS
@register(
i_(Commands, "global_aliases"),
i_(Layers, "metadata_by_layer_name"),
i_(Layers, "layer_by_target_path"),
o_(Commands, "layer_name_by_inferred_command"),
)
def get_inferred_command_map(
global_aliases, metadata_by_layer_name, layer_by_target_path
):
return dict(
layer_name_by_inferred_command=_get_inferred_command_map(
global_aliases,
metadata_by_layer_name,
layer_by_target_path,
)
)
# COMMANDS
@register(
i_(Config, "config"),
#
o_(Commands, "aliases_from_config"),
)
def get_aliases_from_config(config):
return dict(aliases_from_config=get_aliases(config))
# COMMANDS
@register(
i_(Config, "config"), o_(Commands, "command_dirs"), o_(Commands, "command_map")
)
def get_command_map(config):
command_dirs = get_command_dirs_from_config(config)
return dict(
command_dirs=command_dirs,
command_map=_get_command_map(command_dirs),
)
| StarcoderdataPython |
11397672 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RDesolve(RPackage):
"""Functions that solve initial value problems of a system of first-order
ordinary differential equations ('ODE'), of partial differential
equations ('PDE'), of differential algebraic equations ('DAE'), and of
delay differential equations."""
homepage = "https://cloud.r-project.org/package=deSolve"
url = "https://cloud.r-project.org/src/contrib/deSolve_1.20.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/deSolve"
version('1.24', sha256='3aa52c822abb0348a904d5bbe738fcea2b2ba858caab9f2831125d07f0d57b42')
version('1.21', sha256='45c372d458fe4c7c11943d4c409517849b1be6782dc05bd9a74b066e67250c63')
version('1.20', '85c6a2d8568944ae8eef27ac7c35fb25')
depends_on('r@2.15.0:', type=('build', 'run'))
| StarcoderdataPython |
4919421 | <filename>Greay_Atom_Courses/code.py
# --------------
# Code starts here
#Intruction
# Create a 'class_1' list and pass the elements '<NAME>','<NAME>','<NAME>','<NAME>'.
class_1 = ['<NAME>','<NAME>','<NAME>','<NAME>']
#Create a 'class_2' list and pass the elements '<NAME>','<NAME>','<NAME>'.
class_2 = ['<NAME>','<NAME>','<NAME>']
#Concatenate the 'class_1' and 'class_2' list. Store the values in a'new_class' variable.
new_class = class_1 + class_2
print(new_class)
#Add new element '<NAME>' in the 'new_class' list.
new_class.append('<NAME>')
#Print 'new_class' to see the updated list.
print(new_class)
# Remove the '<NAME>'element from the 'new_class' list.
new_class.remove('<NAME>')
#Print 'new_class' to see the updated list.
print(new_class)
# Code ends here
# --------------
# Code starts here
courses = {"Math": 65, "English": 70, "History": 80, "French": 70, "Science": 60}
total = courses["Math"] + courses["English"] + courses["History"] + courses["French"] + courses["Science"]
print(total)
addition = 500
percentage = total / addition *100
print(percentage)
print("percentage scored by Geoffrey Hinton is", percentage)
# Code ends here
# --------------
# Code starts here
mathematics = {"<NAME>": 78, "<NAME>": 95, "<NAME>": 65, "<NAME>": 50, "<NAME>": 70, "<NAME>": 66, "<NAME>": 75}
topper = max(mathematics, key = mathematics.get)
print(topper)
# Code ends here
# --------------
# Given string
topper = '<NAME>'
first_name, last_name = topper.split()[0], topper.split()[1]
full_name = last_name + " " + first_name
certificate_name = full_name.upper()
print(certificate_name)
# Code starts here
# Code ends here
| StarcoderdataPython |
9759979 | <filename>3d/marin/marin.py
from math import *
import proteus.MeshTools
from proteus import Domain
from proteus.default_n import *
from proteus.Profiling import logEvent
# Discretization -- input options
#Refinement=8#4-32 cores
#Refinement=12
Refinement=24
genMesh=True
useOldPETSc=False
useSuperlu=False
spaceOrder = 1
useHex = False
useRBLES = 0.0
useMetrics = 1.0
applyCorrection=True
useVF = 1.0
useOnlyVF = False
redist_Newton = False#True
useRANS = 0 # 0 -- None
# 1 -- K-Epsilon
# 2 -- K-Omega
# Input checks
if spaceOrder not in [1,2]:
print "INVALID: spaceOrder" + spaceOrder
sys.exit()
if useRBLES not in [0.0, 1.0]:
print "INVALID: useRBLES" + useRBLES
sys.exit()
if useMetrics not in [0.0, 1.0]:
print "INVALID: useMetrics"
sys.exit()
# Discretization
nd = 3
if spaceOrder == 1:
hFactor=1.0
if useHex:
basis=C0_AffineLinearOnCubeWithNodalBasis
elementQuadrature = CubeGaussQuadrature(nd,2)
elementBoundaryQuadrature = CubeGaussQuadrature(nd-1,2)
else:
basis=C0_AffineLinearOnSimplexWithNodalBasis
elementQuadrature = SimplexGaussQuadrature(nd,3)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,3)
elif spaceOrder == 2:
hFactor=0.5
if useHex:
basis=C0_AffineLagrangeOnCubeWithNodalBasis
elementQuadrature = CubeGaussQuadrature(nd,4)
elementBoundaryQuadrature = CubeGaussQuadrature(nd-1,4)
else:
basis=C0_AffineQuadraticOnSimplexWithNodalBasis
elementQuadrature = SimplexGaussQuadrature(nd,4)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,4)
# Domain and mesh
nLevels = 1
parallelPartitioningType = proteus.MeshTools.MeshParallelPartitioningTypes.node
nLayersOfOverlapForParallel = 0
use_petsc4py=True#False
if useHex:
hex=True
comm=Comm.get()
if comm.isMaster():
size = numpy.array([[0.520,0.510 ,0.520],
[0.330,0.335833,0.330],
[0.320,0.325 ,0.000]])/float(Refinement)
numpy.savetxt('size.mesh', size)
failed = os.system("../../scripts/marinHexMesh")
domain = Domain.MeshHexDomain("marinHex")
else:
L = [3.22,1.0,1.0]
box_L = [0.161,0.403,0.161]
box_xy = [2.3955,0.2985]
#he = L[0]/float(6.5*Refinement)
he = L[0]/64.0
he*=0.5#256
boundaries=['left','right','bottom','top','front','back','box_left','box_right','box_top','box_front','box_back',]
boundaryTags=dict([(key,i+1) for (i,key) in enumerate(boundaries)])
bt = boundaryTags
holes = [[0.5*box_L[0]+box_xy[0],0.5*box_L[1]+box_xy[1],0.5*box_L[2]]]
vertices=[[0.0,0.0,0.0],#0
[L[0],0.0,0.0],#1
[L[0],L[1],0.0],#2
[0.0,L[1],0.0],#3
[0.0,0.0,L[2]],#4
[L[0],0.0,L[2]],#5
[L[0],L[1],L[2]],#6
[0.0,L[1],L[2]],#7
[box_xy[0],box_xy[1],0.0],#8
[box_xy[0]+box_L[0],box_xy[1],0.0],#9
[box_xy[0]+box_L[0],box_xy[1]+box_L[1],0.0],#10
[box_xy[0],box_xy[1]+box_L[1],0.0],#11
[box_xy[0],box_xy[1],box_L[2]],#12
[box_xy[0]+box_L[0],box_xy[1],box_L[2]],#13
[box_xy[0]+box_L[0],box_xy[1]+box_L[1],box_L[2]],#14
[box_xy[0],box_xy[1]+box_L[1],box_L[2]]]#15
vertexFlags=[boundaryTags['left'],
boundaryTags['right'],
boundaryTags['right'],
boundaryTags['left'],
boundaryTags['left'],
boundaryTags['right'],
boundaryTags['right'],
boundaryTags['left'],
boundaryTags['box_left'],
boundaryTags['box_left'],
boundaryTags['box_left'],
boundaryTags['box_left'],
boundaryTags['box_left'],
boundaryTags['box_left'],
boundaryTags['box_left'],
boundaryTags['box_left']]
facets=[[[0,1,2,3],[8,9,10,11]],
[[0,1,5,4]],
[[1,2,6,5]],
[[2,3,7,6]],
[[3,0,4,7]],
[[4,5,6,7]],
[[8,9,13,12]],
[[9,10,14,13]],
[[10,11,15,14]],
[[11,8,12,15]],
[[12,13,14,15]]]
facetFlags=[boundaryTags['bottom'],
boundaryTags['front'],
boundaryTags['right'],
boundaryTags['back'],
boundaryTags['left'],
boundaryTags['top'],
boundaryTags['box_front'],
boundaryTags['box_right'],
boundaryTags['box_back'],
boundaryTags['box_left'],
boundaryTags['box_top']]
domain = Domain.PiecewiseLinearComplexDomain(vertices=vertices,
vertexFlags=vertexFlags,
facets=facets,
facetFlags=facetFlags,
holes=holes)
#go ahead and add a boundary tags member
domain.boundaryTags = boundaryTags
domain.writePoly("mesh")
domain.writePLY("mesh")
domain.writeAsymptote("mesh")
triangleOptions="VApq1.25q12ena%e" % ((he**3)/6.0,)
logEvent("""Mesh generated using: tetgen -%s %s""" % (triangleOptions,domain.polyfile+".poly"))
# Time stepping
T=6.00
dt_init =0.001
dt_fixed = 0.1/Refinement
nDTout = int(round(T/dt_fixed))
# Numerical parameters
ns_forceStrongDirichlet = False#True
if useMetrics:
ns_shockCapturingFactor = 0.9
ns_lag_shockCapturing = True
ns_lag_subgridError = True
ls_shockCapturingFactor = 0.9
ls_lag_shockCapturing = True
ls_sc_uref = 1.0
ls_sc_beta = 1.5
vof_shockCapturingFactor = 0.9
vof_lag_shockCapturing = True
vof_sc_uref = 1.0
vof_sc_beta = 1.5
rd_shockCapturingFactor = 0.9
rd_lag_shockCapturing = False
epsFact_density = 1.5
epsFact_viscosity = epsFact_curvature = epsFact_vof = epsFact_consrv_heaviside = epsFact_consrv_dirac = epsFact_density
epsFact_redistance = 0.33
epsFact_consrv_diffusion = 10.0
redist_Newton = False
else:
ns_shockCapturingFactor = 0.9
ns_lag_shockCapturing = True
ns_lag_subgridError = True
ls_shockCapturingFactor = 0.9
ls_lag_shockCapturing = True
ls_sc_uref = 1.0
ls_sc_beta = 1.0
vof_shockCapturingFactor = 0.9
vof_lag_shockCapturing = True
vof_sc_uref = 1.0
vof_sc_beta = 1.0
rd_shockCapturingFactor = 0.9
rd_lag_shockCapturing = False
epsFact_density = 1.5
epsFact_viscosity = epsFact_curvature = epsFact_vof = epsFact_consrv_heaviside = epsFact_consrv_dirac = epsFact_density
epsFact_redistance = 0.33
epsFact_consrv_diffusion = 10.0
redist_Newton = False
kappa_shockCapturingFactor = 0.9
kappa_lag_shockCapturing = True#False
kappa_sc_uref = 1.0
kappa_sc_beta = 1.0
dissipation_shockCapturingFactor = 0.9
dissipation_lag_shockCapturing = True#False
dissipation_sc_uref = 1.0
dissipation_sc_beta = 1.0
ns_nl_atol_res = max(1.0e-8,0.1*he**2/2.0)
vof_nl_atol_res = max(1.0e-8,0.1*he**2/2.0)
ls_nl_atol_res = max(1.0e-8,0.1*he**2/2.0)
rd_nl_atol_res = max(1.0e-8,0.1*he)
mcorr_nl_atol_res = max(1.0e-8,0.01*he**2/2.0)
kappa_nl_atol_res = max(1.0e-8,0.01*he**2/2.0)
dissipation_nl_atol_res = max(1.0e-8,0.01*he**2/2.0)
#turbulence
ns_closure=2 #1-classic smagorinsky, 2-dynamic smagorinsky, 3 -- k-epsilon, 4 -- k-omega
if useRANS == 1:
ns_closure = 3
elif useRANS == 2:
ns_closure == 4
# Water
rho_0 = 998.2
nu_0 = 1.004e-6
# Air
rho_1 = 1.205
nu_1 = 1.500e-5
# Surface tension
sigma_01 = 0.0
# Gravity
g = [0.0,0.0,-9.8]
# Initial condition
waterLine_x = 1.20
waterLine_z = 0.55
def signedDistance(x):
phi_x = x[0]-waterLine_x
phi_z = x[2]-waterLine_z
if phi_x < 0.0:
if phi_z < 0.0:
return max(phi_x,phi_z)
else:
return phi_z
else:
if phi_z < 0.0:
return phi_x
else:
return sqrt(phi_x**2 + phi_z**2)
| StarcoderdataPython |
170686 | #compdef createmodule.py
local arguments
arguments=(
'(- * :)'{-h,--help}'[show this help message and exit]'
{-p,--prefix}'[specify path prefix]'
'--noprefix[do not generate a prefix]'
'*:filename:_files'
)
_arguments -s $arguments
| StarcoderdataPython |
141725 |
import pytest
@pytest.fixture
def validator():
"""Return validator fixture."""
from json_validator import JsonValidator
return JsonValidator
@pytest.fixture
def dumps():
"""Return json dumps fixture."""
from json import dumps
return dumps
| StarcoderdataPython |
1662918 | <filename>pyhalo/api.py<gh_stars>1-10
__author__ = '<NAME> (@DamonLPollard)'
import requests
class HaloFour():
def __init__(self, waypoint_token):
self.waypoint_token = waypoint_token
def get_api_version(self):
url = "https://app.halowaypoint.com/en-US/home/version"
return self._fetch_json(url)
def get_api_services(self):
url = "https://settings.svc.halowaypoint.com/RegisterClientService.svc" \
"/register/webapp/AE5D20DCFA0347B1BCE0A5253D116752"
return self._fetch_json(url)
def get_user_achievements(self, params):
url = "https://haloplayer.svc.halowaypoint.com/HaloPlayer/GetOtherUserAchievements"
return self._fetch_json(url, params)
def get_playlists(self):
url = "https://presence.svc.halowaypoint.com/en-US/h4/playlists"
return self._fetch_json(url)
def get_global_challenges(self):
url = "https://stats.svc.halowaypoint.com/en-US/h4/challenges"
return self._fetch_json(url)
def get_player_challenges(self):
url = "https://stats.svc.halowaypoint.com/en-US/players/%s/h4/challenges" % self.waypoint_token.gamertag
return self._fetch_json(url)
def get_game_metadata(self, params=None):
url = "https://stats.svc.halowaypoint.com/en-US/h4/metadata"
return self._fetch_json(url, params)
def get_playercard(self, gamertag):
url = "https://stats.svc.halowaypoint.com/en-US/players/%s/h4/playercard" % gamertag
return self._fetch_json(url)
def get_multiple_playercards(self, params):
url = "https://stats.svc.halowaypoint.com/en-US/h4/playercards"
return self._fetch_json(url, params)
def get_service_record(self, gamertag):
url = "https://stats.svc.halowaypoint.com/en-US/players/%s/h4/servicerecord" % gamertag
return self._fetch_json(url)
def get_game_history(self, gamertag, params=None):
url = "https://stats.svc.halowaypoint.com/en-US/players/%s/h4/matches" % gamertag
return self._fetch_json(url, params)
def get_game_details(self, game_id):
url = "https://stats.svc.halowaypoint.com/en-US/h4/matches/%s" % game_id
return self._fetch_json(url)
def get_commendations(self, gamertag):
url = "https://stats.svc.halowaypoint.com/en-US/players/%s/h4/commendations" % gamertag
return self._fetch_json(url)
# HTTP 500
#def get_ranks(self, gamertag):
# url = "https://stats.svc.halowaypoint.com/en-US/players/%s/h4/ranks" % gamertag
# return self._fetch_json(url)
def get_campaign_details(self, gamertag):
url = "https://stats.svc.halowaypoint.com/en-US/players/%s/h4/servicerecord/campaign" % gamertag
return self._fetch_json(url)
def get_spartanops_details(self, gamertag):
url = "https://stats.svc.halowaypoint.com/en-US/players/%s/h4/servicerecord/spartanops" % gamertag
return self._fetch_json(url)
def get_wargame_details(self, gamertag):
url = "https://stats.svc.halowaypoint.com/en-US/players/%s/h4/servicerecord/wargames" % gamertag
return self._fetch_json(url)
def get_customgame_details(self, gamertag):
url = "https://stats.svc.halowaypoint.com/en-US/players/%s/h4/servicerecord/custom" % gamertag
return self._fetch_json(url)
def get_spartan_image(self, gamertag, pose, params=None):
url = "https://spartans.svc.halowaypoint.com/players/%s/h4/spartans/%s" % (gamertag, pose)
return self._fetch_png(url, params)
def _fetch_json(self, url, params=None):
r = requests.get(url,
headers={
'user-agent': 'PyHalo/0.1 (%s)' % self.waypoint_token.live_username,
'accept': 'application/json',
'Accept-Encoding': 'gzip,deflate',
'X-343-Authorization-Spartan': self.waypoint_token.spartan_token
},
params=params,
verify=False
)
return r.json()
def _fetch_png(self, url, params=None):
r = requests.get(url,
headers={
'user-agent': 'PyHalo/0.1 (%s)' % self.waypoint_token.live_username,
'accept': 'image/png',
'Accept-Encoding': 'gzip,deflate',
'X-343-Authorization-Spartan': self.waypoint_token.spartan_token
},
params=params,
verify=False
)
return r.content
| StarcoderdataPython |
1711241 | <filename>cryptex/order.py
class Order(object):
'''
Basic order
'''
order_type = 0
def __init__(self, order_id, base_currency, counter_currency,
datetime, amount, price):
self.order_id = order_id
self.base_currency = base_currency
self.counter_currency = counter_currency
self.datetime = datetime
self.amount = amount
self.price = price
def type(self):
return self.__class__.__name__
def __str__(self):
return repr(self.__dict__)
class BuyOrder(Order):
order_type = 1
class SellOrder(Order):
order_type = 2
| StarcoderdataPython |
12835327 | import logging
import itertools
import numpy as np
from scipy.optimize import OptimizeResult, minimize_scalar
import scipy.constants
from .util import find_vertex_x_of_positive_parabola
def scalar_discrete_gap_filling_minimizer(
fun, bracket, args=(), tol=1.0, maxfev=None, maxiter=100, callback=None, verbose=False,
parabolic_method=False, golden_section_method=False, best_x_aggregator=None, **options):
"""Find a local minimum of a scalar function of a single integer variable.
The domain of the function is all integers between, and including, the bracket.
The function may have flat spots where f(a) == f(b) for a != b and this method will
attempt to search around and within the flat spots.
The function must have exactly one local minimum in the bracket.
This method maintains a left and right bracket, where the function value is greater than the best known minimum.
It also maintains a list of best x values, and the function values at all of these x values equals the best known
minimum.
At each iteration, it finds the largest gap in these x values (including the brackets) and selects
the point in the center of the largest gap.
It will then either adjust the bracket or add to the list of best x values.
The method terminates when the largest gap is less than or equal to tol.
Parameters
----------
bracket : array_like
A tuple of the bounds of the function (x_min, x_max).
Optionally, a 3-tuple can be specified and the middle point will be the initial best point.
tol : float
The method terminates when the largest gap is less than or equal to this value.
Returns
-------
OptimizeResult
The result of the minimization.
"""
# bestx is a list.
# besty is a scalar and equals f(x) for all x in bestx.
funcalls = 0
# print('parabolic_method=%s,golden_section_method=%s' % (parabolic_method,golden_section_method))
if len(bracket) == 2:
bracket_left_x = bracket[0]
bracket_right_x = bracket[1]
bestx = [np.round(np.mean([bracket_left_x, bracket_right_x]))]
a = bracket_left_x
b = bracket_right_x
if golden_section_method:
bestx = [np.round(b - (b - a) / scipy.constants.golden)]
else:
bestx = [np.round(np.mean([a, b]))]
elif len(bracket) == 3:
bracket_left_x = bracket[0]
bracket_right_x = bracket[2]
bestx = [bracket[1]]
else:
raise ValueError('Invalid bracket')
assert isinstance(bestx, list)
if not (bracket_left_x <= bestx[0] <= bracket_right_x):
raise ValueError('Invalid bracket')
if best_x_aggregator is None:
best_x_aggregator = lambda x: x[int((len(x)-1)/2)]
# Evaluate function at bestx.
besty = fun(bestx[0])
funcalls += 1
assert np.isscalar(besty)
# Evaluate function at brackets to determine if they are better than the initial bestx.
bracket_left_y = fun(bracket_left_x, *args)
bracket_right_y = fun(bracket_right_x, *args)
funcalls += 2
if bracket_left_y < besty:
bestx = [bracket_left_x]
besty = bracket_left_y
if bracket_right_y < besty:
bestx = [bracket_right_x]
besty = bracket_right_y
if verbose: logging.info('bracket=(%f,%s,%f); besty=%f' % (bracket_left_x, str(bestx), bracket_right_x, besty))
niter = 0
while niter < maxiter:
niter += 1
X = np.array([bracket_left_x] + bestx + [bracket_right_x])
Y = np.array([bracket_left_y] + [besty] * len(bestx) + [bracket_right_y])
# if verbose:
# logging.info('X=%s' % str(X))
# logging.info('Y=%s' % str(Y))
testx = None
testx_index = None
#
# Step 1: Determine the value of x to test next (testx).
#
# If we have exactly one bestx, then fit a parabola to the 3 points and test the vertex.
if parabolic_method and len(bestx) == 1:
if verbose: logging.info('Attempting parabolic method')
try:
# Attempt to fit a parabola to the 3 points and find the vertex.
testx = find_vertex_x_of_positive_parabola(X, Y)
if verbose: logging.info('Parabolic method returned testx=%f' % testx)
testx = np.round(testx)
if testx <= bracket_left_x or testx >= bracket_right_x or testx == bestx[0]:
testx = None
elif testx <= bestx[0]:
testx_index = 0
else:
testx_index = 1
except:
# This will happen if a parabola can't be fit through the 3 points.
# Ignore error and use the gap method below.
testx = None
if testx is None:
# Measure gaps in brackets and bestx and find the largest one.
if verbose: logging.info('Attempting gap method')
gaps = np.diff(X)
testx_index = np.argmax(gaps)
gapsize = gaps[testx_index]
if gapsize <= tol:
if verbose: logging.info('Achieved gap size tol')
break
# Pick a point between the largest gap.
a = X[testx_index]
b = X[testx_index + 1]
if golden_section_method:
golden_distance = (b - a) / scipy.constants.golden
if bool(np.random.randint(low=0, high=2)):
testx = np.round(b - golden_distance)
else:
testx = np.round(a + golden_distance)
else:
testx = np.round(np.mean([a, b]))
if verbose: logging.info('gapsize=%f, len(bestx)=%d, testx=%f' % (gapsize, len(bestx), testx))
assert(testx is not None)
assert(testx_index is not None)
assert(bracket_left_x <= testx <= bracket_right_x)
#
# Step 2: Evaluate function at testx.
#
testy = fun(testx, *args)
funcalls += 1
#
# Step 3: Update bracket, etc. based on function value testy at testx.
#
add_to_bestx = False
if testy < besty:
# Found a point better than all others so far.
# The new bracket will be the points to the immediate left and right of the test point.
bestx = [testx]
besty = testy
bracket_left_x = X[testx_index]
bracket_left_y = Y[testx_index]
bracket_right_x = X[testx_index + 1]
bracket_right_y = Y[testx_index + 1]
elif testy > besty:
# Point is worse than best. Reduce bracket.
if testx_index == 0:
# Test point was adjacent to left bracket.
bracket_left_x = testx
bracket_left_y = testy
elif testx_index == len(X) - 2:
# Test point was adjacent to right bracket.
bracket_right_x = testx
bracket_right_y = testy
else:
# Test point was inside the set of bestx points but is worse than besty.
# This indicates more than one local minima or a round off error.
# We will assume a round off error and handle it as if it had the same besty.
add_to_bestx = True
else:
# Point is same as best. Add it to the bestx list.
add_to_bestx = True
if add_to_bestx:
bestx = sorted(bestx + [testx])
if verbose: logging.info('bracket=(%f,%s,%f); besty=%f' % (bracket_left_x, str(bestx), bracket_right_x, besty))
if callback is not None:
callback(bestx)
if maxfev is not None and funcalls >= maxfev:
break
# Return the x that is in the median of bestx.
bestx = best_x_aggregator(np.array(bestx))
return OptimizeResult(fun=besty, x=bestx, nit=niter, nfev=funcalls, success=(niter > 1))
def multivariate_discrete_gap_filling_minimizer(
fun, x0, bounds, args=(), tol=1.0, maxfev=None, maxiter=2, callback=None, verbose=False,
scalar_options={}, axes=None, **options):
"""It is assumed that there is exactly one local minimum in the domain.
For each dimension, the domain of the function consists of all integers between, and including, the bounds.
The function may have flat spots where f(a) == f(b) for a != b and this method will
attempt to search around and within the flat spots.
This multivariate method uses `scalar_gap_filling_minimizer` repeatedly along each dimension
for a fixed number of iterations. There is currently no other stopping criteria.
Parameters
----------
fun: Function of a single variable of a list-type.
x0 : array_like
Initial guess.
bounds
List-type of (min, max) pairs for each element in x, defining the bounds in that dimension.
tol
See `scalar_discrete_gap_filling_minimizer`.
axes : array_like
Number of columns must equal length of x0.
The rows will determine the set of axes that this function will optimize along.
Leave as None to use unit axes along each dimension.
Returns
-------
OptimizeResult
The result of the minimization.
"""
ndims = len(x0)
bounds = np.array(bounds)
if bounds.shape != (ndims, 2):
raise ValueError()
if axes is None:
axes = np.eye(ndims)
if axes.shape[1] != ndims:
raise ValueError()
naxes = len(axes)
if naxes <= 0:
raise ValueError()
bestx = x0
besty = np.inf
niter = 0
funcalls = 0
while niter < maxiter:
niter += 1
for i in range(naxes):
axis = axes[i]
if verbose:
logging.info('multivariate_discrete_gap_filling_minimizer: axis %d, %s' % (i, str(axis)))
def transform(t):
return bestx + t*axis
# Function of single variable (t) that we will optimize during this iteration.
def scalar_fun(t):
testx = transform(t)
return fun(testx)
# logging.info(transform(0.0))
# logging.info(scalar_fun(0.0))
# Determine bracket along optimization axis (for t).
# All axes must remain within their respective bounds.
bracket = np.array([-np.inf, 0.0, np.inf])
for j in range(ndims):
if axis[j] != 0.0:
btj = np.sort((bounds[j] - bestx[j]) / axis[j])
if bracket[0] < btj[0]:
bracket[0] = btj[0]
if bracket[2] > btj[1]:
bracket[2] = btj[1]
# if verbose:
# logging.info('multivariate_discrete_gap_filling_minimizer: bracket=%s' % str(bracket))
optresult = minimize_scalar(
scalar_fun, bracket=bracket, tol=tol, method=scalar_discrete_gap_filling_minimizer,
options=scalar_options)
if verbose:
logging.info('minimize_scalar returned t=%f, y=%f' % (optresult.x, optresult.fun))
bestx = transform(optresult.x)
besty = optresult.fun
if verbose:
logging.info(
'multivariate_gap_filling_minimizer: niter=%d, axis=%d, best f(%s) = %f'
% (niter, i, str(bestx), besty))
funcalls += optresult.nfev
if maxfev is not None and funcalls >= maxfev:
break
return OptimizeResult(fun=besty, x=bestx, nit=niter, nfev=funcalls, success=(niter > 1))
def simple_global_minimizer_spark(
fun, x0, bounds, sc=None, verbose=True, **options):
"""Exhaustive global minimizer with same calling convention as `multivariate_discrete_gap_filling_minimizer`.
Parameters
----------
fun
Function of a single variable of a list-type.
x0
Unused but kept for compatibility.
bounds
List-type of (min, max) pairs for each element in x, defining the bounds in that dimension.
sc (SparkContext)
The SparkContext.
Returns
-------
OptimizeResult
The result of the minimization.
"""
axis_domains = [range(a, b+1) for a, b in bounds]
domain_size = np.product([len(d) for d in axis_domains])
if verbose:
logging.info('simple_global_minimizer_spark: domain_size=%d' % domain_size)
domain = itertools.product(*axis_domains)
domain_rdd = sc.parallelize(domain)
# Evaluate function at each point in parallel using Spark.
fun_eval_rdd = domain_rdd.map(lambda x: (fun(x), x))
# Find the minimum y value. Secondary sort on the x value for tie breaking.
best_y, best_x = fun_eval_rdd.min(lambda yx: (yx[0][0], yx[1]) if isinstance(yx[0], tuple) else (yx[0], yx[1]))
return OptimizeResult(fun=best_y, x=best_x, nfev=domain_size, success=True)
| StarcoderdataPython |
11251912 | <gh_stars>100-1000
class Frame(BaseSheet):
'''Maintains the data as records.
'''
def __init__(self, frame=None, columns=None, nan=None):
self._data = []
BaseSheet.__init__(self, frame, columns, nan)
@property
def info(self):
new_m_v = map(str, self._missing)
max_n = len(max(self._columns, key=len))
info = ''
for i in xrange(self._dim.Col):
info += ' ' * 15
info += self._columns[i].center(max_n) + '| '
info += ' ' + new_m_v[i] + '\n'
print('1. Structure: DaPy.Frame\n' +
'2. Dimensions: Ln=%d | Col=%d\n' % self._dim +
'3. Miss Value: %d elements\n' % sum(self._missing) +
'4. Columns: ' + 'Title'.center(max_n) + '|' +
' Miss\n' + info)
@property
def T(self):
return Frame(self.iter_values(), None, self.nan)
def _init_col(self, obj, columns):
if columns is None:
columns = copy(obj._columns)
self._data = [list(record) for record in zip(*list(obj.values()))]
self._missing = copy(obj._missing)
self._dim = SHEET_DIM(obj._dim.Ln, obj._dim.Col)
self._init_col_name(columns)
def _init_frame(self, frame, columns):
if columns is None:
columns = copy(obj._columns)
self._data = deepcopy(frame._data)
self._dim = copy(frame._dim)
self._init_col_name(columns)
self._missing = copy(frame._missing)
def _init_dict(self, frame, columns):
if columns is None:
columns = list(obj.keys())
frame = copy(frame)
self._dim = SHEET_DIM(max(map(len, frame.values())), len(frame))
self._missing = [0] * self._dim.Col
self._init_col_name(columns)
for i, (title, col) in enumerate(frame.items()):
miss, sequence = self._check_sequence(col, self._dim.Ln)
frame[title] = sequence
self._missing[i] += miss
self._data = [list(record) for record in zip(*frame.values())]
def _init_like_table(self, frame, columns):
self._data = map(list, frame)
dim_Col, dim_Ln = len(max(self._data, key=len)), len(frame)
self._dim = SHEET_DIM(dim_Ln, dim_Col)
self._missing = [0] * self._dim.Col
for i, item in enumerate(self._data):
if len(item) < dim_Col:
item.extend([self._nan] * (dim_Col - len(item)))
for j, value in enumerate(item):
if value == self.nan or value is self.nan:
self._missing[j] = self._missing[j] + 1
self._init_col_name(columns)
def _init_like_seq(self, frame, columns):
self._data = [[value, ] for value in frame]
self._dim = SHEET_DIM(len(frame), 1)
self._init_col_name(columns)
self._missing.append(self._check_sequence(frame, len(frame))[0])
def __repr__(self):
return self.show(30)
def _getslice_col(self, i, j):
new_data = [record[i: j + 1] for record in self._data]
return Frame(new_data, self._columns[i: j + 1], self._nan)
def _getslice_ln(self, i, j, k):
return Frame(self._data[i:j:k], self._columns, self._nan)
def __getitem__(self, interval):
if isinstance(interval, int):
return Row(self, interval)
elif isinstance(interval, slice):
return self.__getslice__(interval)
elif is_str(interval):
col = self._columns.index(interval)
return [item[col] for item in self._data]
elif isinstance(interval, (tuple, list)):
return_obj = Frame()
return self._getitem_by_tuple(interval, return_obj)
else:
raise TypeError('item must be represented as slice, int, str.')
def __iter__(self):
for i in xrange(self._dim.Ln):
yield Row(self, i)
def append_row(self, item):
'''append a new record to the Frame tail
'''
item = self._add_row(item)
self._data.append(item)
def append_col(self, series, variable_name=None):
'''append a new variable to the current records tail
'''
miss, series = self._check_sequence(series, self._dim.Ln)
size = len(series) - self._dim.Ln
if size > 0:
self._missing = [m + size for m in self._missing]
self._data.extend(
[[self._nan] * self._dim.Col for i in xrange(size)])
self._missing.append(miss)
for record, element in zip(self._data, series):
record.append(element)
self._columns.append(self._check_col_new_name(variable_name))
self._dim = SHEET_DIM(max(self._dim.Ln, len(series)), self._dim.Col + 1)
assert len(self._missing) == self._dim.Col == len(self.columns)
def count(self, X, point1=None, point2=None):
if is_value(X):
X = (X,)
counter = Counter()
L1, C1, L2, C2 = self._check_area(point1, point2)
for record in self._data[L1:L2 + 1]:
for value in record[C1:C2 + 1]:
if value in X:
counter[value] += 1
if len(X) == 1:
return counter[X[0]]
return dict(counter)
def extend(self, other, inplace=False):
if isinstance(other, Frame):
if inplace is False:
self = SeriesSet(Frame)
new_title = 0
for title in other._columns:
if title not in self._columns:
self._columns.append(title)
new_title += 1
for record in self._data:
record.extend([self._nan] * new_title)
extend_part = [[self._nan] * len(self._columns)
for i in xrange(len(other))]
new_title_index = [self._columns.index(title)
for title in other._columns]
self._dim = SHEET_DIM(len(self) + len(other), len(self._columns))
self._missing.extend([self._dim.Ln] * new_title)
for i, record in enumerate(other._data):
for j, value in zip(new_title_index, record):
if value == other._nan:
value = self._nan
extend_part[i][j] = value
self._data.extend(extend_part)
return self
elif isinstance(other, SeriesSet):
return self.extend(Frame(other), inplace)
else:
return self.extend(Frame(other, self._columns), inplace)
def join(self, other, inplace=False):
if isinstance(other, Frame):
if inplace is False:
self = Frame(self)
for title in other._columns:
self._columns.append(self._check_col_new_name(title))
self._missing.extend(other._missing)
for i, record in enumerate(other._data):
if i < self._dim.Ln:
current_record = self._data[i]
else:
current_record = [self._nan] * self._dim.Col
self._data.append(current_record)
for value in record:
if value == other.nan:
value = self._nan
current_record.append(value)
if i < self._dim.Ln:
for record in self._data[i + 1:]:
record.extend([self._nan] * other.shape.Col)
self._dim = SHEET_DIM(len(self._data), len(self._columns))
return self
else:
self.join(Frame(other, nan=self.nan), inplace)
def insert_row(self, index, item):
'''insert a new record to the frame with position `index`
'''
item = self._add_row(item)
self._data.insert(index, item)
def insert_col(self, index, series, variable_name=None):
'''insert a new variable to the current records in position `index`
'''
miss, series = self._check_sequence(series)
size = len(series) - self._dim.Ln
if size > 0:
for i in xrange(self._dim.Col):
self._missing[i] += size
self._data.extend([[self._nan] * self._dim.Col
for i in xrange(size)])
self._missing.insert(index, miss)
for i, element in enumerate(series):
self._data[i].insert(index, element)
self._columns.insert(index, self._check_col_new_name(variable_name))
self._dim = SHEET_DIM(max(self._dim.Ln, size), self._dim.Col + 1)
def items(self):
for i, sequence in enumerate(zip(*self._data)):
yield self._columns[i], list(sequence)
def keys(self):
return self._columns
def pop_row(self, pos=-1):
'''pop(remove & return) a record from the Frame
'''
err = 'an int or ints in list is required.'
assert isinstance(pos, (int, list, tuple)), err
if isinstance(pos, int):
pos = [pos, ]
pos = sorted(pos, reverse=True)
pop_item = Frame([self._data.pop(pos_)
for pos_ in pos], list(self._columns))
self._dim = SHEET_DIM(self._dim.Ln - len(pos), self._dim.Col)
self._missing = map(
lambda x, y: x - y,
self._missing,
pop_item._missing)
return pop_item
def from_file(self, addr, **kwrd):
'''read dataset from csv or txt file.
'''
raise NotImplementedError('use DaPy.SeriesSet.from_file()')
def reverse(self):
self._data.reverse()
def shuffle(self):
shuffles(self._data)
def _values(self):
for sequence in zip(*self._data._data):
yield list(sequence)
def values(self):
for sequence in zip(*self._data):
yield Series(sequence)
def pop_col(self, pos=-1):
'''pop(remove & return) a series from the Frame
'''
pop_name = self._check_columns_index(pos)
for name in pop_name:
index = self._columns.index(name)
self._columns.pop(index)
self._missing.pop(index)
pop_data = [[] for i in xrange(len(pop_name))]
new_data = [0] * self._dim.Ln
for j, record in enumerate(self._data):
line = []
for i, value in enumerate(record):
if i in pop_name:
pop_data[pop_name.index(i)].append(value)
else:
line.append(value)
new_data[j] = line
self._dim = SHEET_DIM(self._dim.Ln, self._dim.Col - len(pos))
self._data = new_data
return SeriesSet(dict(zip(pop_name, pop_data)))
def dropna(self, axis='LINE'):
'''pop all records that maintains miss value while axis is `LINE` or
pop all variables that maintains miss value while axis is `COL`
'''
pops = []
if str(axis).upper() in ('0', 'LINE'):
for i, record in enumerate(self._data):
if self._nan in record:
pops.append(i)
if str(axis).upper() in ('1', 'COL'):
for i, sequence in enumerate(zip(*self._data)):
if self._nan in sequence:
pops.append(self._columns[i])
if len(pops) != 0:
self.__delitem__(pops)
| StarcoderdataPython |
1807779 | <reponame>MakeSenseCorp/nodes-v3<filename>2022/classes/StockMarketRemote.py
#!/usr/bin/python
import os
import sys
import signal
import json
import time
import _thread
import threading
import base64
import datetime
from datetime import date
import queue
import math
from classes import StockMarketAPI
from classes import AlgoMath
from classes import Algos
class StockCalculation():
def __init__(self):
self.ClassName = "StockCalculation"
self.BasicPredictionPeriodToIndexMap = {
"1D": 0,
"5D": 1,
"1MO": 2,
"3MO": 3,
"6MO": 4,
"1Y": 5
}
# Algo
self.BasicPrediction = Algos.BasicPrediction()
self.Math = AlgoMath.AlgoMath()
self.StockSimplePredictionChangeCallback = None
def CalculateBasicPrediction(self, stock, period):
if stock["price"] <= 0:
print("({classname})# [CalculateBasicPrediction] ({0}) Error: Price not valid for prediction ({1})".format(stock["ticker"],stock["price"],classname=self.ClassName))
return
stock_open = []
for item in stock[period]:
stock_open.append(item["open"])
try:
self.BasicPrediction.SetBuffer(stock_open)
error, output = self.BasicPrediction.Execute()
if error == -1:
print("OPEN", stock["ticker"], item["open"])
return
high = output["output"]["index_high"]
low = output["output"]["index_low"]
x = output["output"]["x"]
index = self.BasicPredictionPeriodToIndexMap[period]
if x[high] < stock["price"]:
if "none" in stock["predictions"]["basic"][index]["action"]["current"]:
stock["predictions"]["basic"][index]["action"]["current"] = "sell"
else:
if "sell" not in stock["predictions"]["basic"][index]["action"]["current"]:
print("({classname})# [CalculateBasicPrediction] ({0}) Prediction changed to SELL {1}".format(stock["ticker"],stock["price"],classname=self.ClassName))
stock["predictions"]["basic"][index]["action"]["previouse"] = stock["predictions"]["basic"][index]["action"]["current"]
stock["predictions"]["basic"][index]["action"]["current"] = "sell"
# Call for update callback
if self.StockSimplePredictionChangeCallback is not None:
self.StockSimplePredictionChangeCallback(stock["ticker"], index)
elif x[low] > stock["price"]:
if "none" in stock["predictions"]["basic"][index]["action"]["current"]:
stock["predictions"]["basic"][index]["action"]["current"] = "buy"
else:
if "buy" not in stock["predictions"]["basic"][index]["action"]["current"]:
print("({classname})# [CalculateBasicPrediction] ({0}) Prediction changed to BUY {1}".format(stock["ticker"],stock["price"],classname=self.ClassName))
stock["predictions"]["basic"][index]["action"]["previouse"] = stock["predictions"]["basic"][index]["action"]["current"]
stock["predictions"]["basic"][index]["action"]["current"] = "buy"
# Call for update callback
if self.StockSimplePredictionChangeCallback is not None:
self.StockSimplePredictionChangeCallback(stock["ticker"], index)
else:
if "none" in stock["predictions"]["basic"][index]["action"]["current"]:
stock["predictions"]["basic"][index]["action"]["current"] = "hold"
else:
if "hold" not in stock["predictions"]["basic"][index]["action"]["current"]:
print("({classname})# [CalculateBasicPrediction] ({0}) Prediction changed to HOLD {1}".format(stock["ticker"],stock["price"],classname=self.ClassName))
stock["predictions"]["basic"][index]["action"]["previouse"] = stock["predictions"]["basic"][index]["action"]["current"]
stock["predictions"]["basic"][index]["action"]["current"] = "hold"
# Call for update callback
if self.StockSimplePredictionChangeCallback is not None:
self.StockSimplePredictionChangeCallback(stock["ticker"], index)
except Exception as e:
print("({classname})# [EXCEPTION] (CalculateBasicPrediction) {0} {1}".format(stock["ticker"],str(e),classname=self.ClassName))
return stock
def GetBasicStatistics(self, data):
regression_line = []
close_line = []
for idx, sample in enumerate(data):
regression_line.append({
"y": sample,
"x": idx
})
close_line.append(sample)
var = self.Math.Variance(close_line)
std = self.Math.Stdev(close_line)
slope, b = self.Math.CalculateRegression(regression_line)
r2 = self.Math.RValue(regression_line, slope, b)
return {
"var": var,
"std": std,
"regression": {
"slope": slope,
"offset": b,
"r_value": r2
}
}
class StockMarket():
def __init__(self):
self.ClassName = "StockMarket"
self.CacheDB = {}
self.WorkerRunning = False
self.Locker = threading.Lock()
self.FirstStockUpdateRun = False
self.MarketOpen = False
self.MarketPollingInterval = 1
self.Logger = None
self.Halt = False
self.Algos = StockCalculation()
self.API = StockMarketAPI.API()
# Callbacks
self.FullLoopPerformedCallback = None
self.StockChangeCallback = None
self.ThresholdEventCallback = None
self.FirstRunDoneCallback = None
self.StockMarketOpenCallback = None
self.StockMarketCloseCallback = None
self.StockSimplePredictionChangeCallback = None
# Threading section
self.ThreadCount = 10
self.Signal = threading.Event()
self.Queues = []
self.ThreadPool = []
self.ThreadPoolStatus = []
self.ThreadPoolLocker = threading.Lock()
self.JoblessMinions = 0
# Init thread minion queues
self.Signal.set()
for idx in range(self.ThreadCount):
self.Queues.append(queue.Queue())
def SetLogger(self, logger):
self.Logger = logger
def LogMSG(self, message, level):
if self.Logger is not None:
self.Logger.Log(message, level)
else:
print("({classname})# [NONE LOGGER] - {0}".format(message,classname=self.ClassName))
def Start(self):
self.WorkerRunning = True
self.LogMSG("({classname})# Start".format(classname=self.ClassName), 5)
_thread.start_new_thread(self.StockMonitorWorker, ())
def Stop(self):
self.WorkerRunning = False
self.LogMSG("({classname})# Stop".format(classname=self.ClassName), 5)
def IsMarketOpen(self):
currTime = datetime.datetime.now().time()
return (currTime > datetime.time(16,25) and currTime < datetime.time(23,5))
'''
def CalculateBasicStatistics(self, data):
s_min = s_max = s_slope = s_b = s_r2 = s_var = s_std = 0
warning = False
if len(data) > 0:
s_min, s_max = self.CalculateMinMax(data)
s_slope, s_b, s_r2 = self.GetRegressionLineStatistics(data)
s_var, s_std = self.GetBasicStatistics(data)
if math.isnan(s_min) is True:
warning = False
s_min = 0
if math.isnan(s_max) is True:
warning = False
s_max = 0
if math.isnan(s_slope) is True:
warning = False
s_slope = 0
if math.isnan(s_b) is True:
warning = False
s_b = 0
if math.isnan(s_r2) is True:
warning = False
s_r2 = 0
if math.isnan(s_var) is True:
warning = False
s_var = 0
if math.isnan(s_std) is True:
warning = False
s_std = 0
else:
warning = False
return {
"warning": warning,
"statistics": {
"min": float("{0:.2f}".format(s_min)),
"max": float("{0:.2f}".format(s_max)),
"slope": float("{0:.2f}".format(s_slope)),
"std": float("{0:.2f}".format(s_std)),
"slope_offset": float("{0:.2f}".format(s_b)),
"r_value": float("{0:.2f}".format(s_r2)),
"varience": float("{0:.2f}".format(s_var))
}
}
'''
def WaitForMinionsToFinish(self):
self.LogMSG("({classname})# [WaitForMinionsToFinish]".format(classname=self.ClassName), 5)
wait_for_all_minions = True
while wait_for_all_minions is True:
wait_for_all_minions = False
for item in self.ThreadPoolStatus:
if item is True:
wait_for_all_minions = True
break
time.sleep(0.5)
return
def StockUpdated(self):
self.LogMSG("({classname})# [StockUpdated]".format(classname=self.ClassName), 5)
for ticker in self.CacheDB:
stock = self.CacheDB[ticker]
if stock["updated"] is False:
return False
return True
def NeedUpdate(self, stock):
if stock is None:
return False
try:
ts = time.time()
if stock["updated"] is True:
vol = stock["1D"][0]["vol"]
if vol > 1000000:
return True
elif vol > 500000:
if ts - stock["ts_last_updated"] > 30.0:
return True
elif vol > 100000:
if ts - stock["ts_last_updated"] > 60.0:
return True
else:
if ts - stock["ts_last_updated"] > 90.0:
return True
else:
return True
except Exception as e:
self.LogMSG("({classname})# [Exeption] NeedUpdate ({0})".format(e,classname=self.ClassName), 5)
return False
def GetPriceListFromStockPeriod(self, data, p_type):
prices = []
if data is not None:
for item in data:
prices.append(item[p_type])
return prices
def StockMinion(self, index):
self.LogMSG("({classname})# [MINION] Reporting for duty ({0})".format(index,classname=self.ClassName), 5)
# Update jobless minons
self.ThreadPoolLocker.acquire()
self.JoblessMinions += 1
self.ThreadPoolLocker.release()
Interval = 0.5
Itterations = 0
ItterationFactor = 1
algos = StockCalculation()
algos.StockSimplePredictionChangeCallback = self.StockSimplePredictionChangeCallback
atock_api = StockMarketAPI.API()
while self.WorkerRunning is True:
try:
item = self.Queues[index].get(block=True,timeout=None)
# Update pool thread status
self.ThreadPoolStatus[index] = True
# Initiate working variables
error = False
ticker = item["ticker"]
stock = self.CacheDB[ticker]
# Print working message
self.LogMSG("({classname})# [MINION] Update stock ({0}) ({1}) ({2})".format(index,ticker,Itterations,classname=self.ClassName), 5)
# Update local stock DB
if stock is not None:
error, stock["price"] = atock_api.GetStockCurrentPrice(ticker) # Get stock price
if error is True:
stock["price"] = None
else:
#
# ---- Get History, Calculate Basic Prediction and Get Basic Statistics
#
# Get 1 day history
if Itterations % (ItterationFactor * 1) == 0 or stock["1D"] is None:
# self.LogMSG("({classname})# [MINION] 1D ({0})".format(ticker,classname=self.ClassName), 5)
error, stock["1D"] = atock_api.Get1D(ticker)
if error is True:
stock["1D"] = None
else:
algos.CalculateBasicPrediction(stock, "1D")
stock_prices = self.GetPriceListFromStockPeriod(stock["1D"], "close")
stock["statistics"]["basic"][0] = algos.GetBasicStatistics(stock_prices)
# Get 5 days history
if Itterations % (ItterationFactor * 2) == 0 or stock["5D"] is None:
# self.LogMSG("({classname})# [MINION] 5D ({0})".format(ticker,classname=self.ClassName), 5)
error, stock["5D"] = atock_api.Get5D(ticker)
if error is True:
stock["5D"] = None
else:
algos.CalculateBasicPrediction(stock, "5D")
stock_prices = self.GetPriceListFromStockPeriod(stock["5D"], "close")
stock["statistics"]["basic"][1] = algos.GetBasicStatistics(stock_prices)
# Get 1 month history
if Itterations % (ItterationFactor * 4) == 0 or stock["1MO"] is None:
# self.LogMSG("({classname})# [MINION] 1MO ({0})".format(ticker,classname=self.ClassName), 5)
error, stock["1MO"] = atock_api.Get1MO(ticker)
if error is True:
stock["1MO"] = None
else:
algos.CalculateBasicPrediction(stock, "1MO")
stock_prices = self.GetPriceListFromStockPeriod(stock["1MO"], "close")
stock["statistics"]["basic"][2] = algos.GetBasicStatistics(stock_prices)
# Get 3 months history
if Itterations % (ItterationFactor * 16) == 0 or stock["3MO"] is None:
# self.LogMSG("({classname})# [MINION] 3MO ({0})".format(ticker,classname=self.ClassName), 5)
error, stock["3MO"] = atock_api.Get3MO(ticker)
if error is True:
stock["3MO"] = None
else:
algos.CalculateBasicPrediction(stock, "3MO")
stock_prices = self.GetPriceListFromStockPeriod(stock["3MO"], "close")
stock["statistics"]["basic"][3] = algos.GetBasicStatistics(stock_prices)
# Get 6 months history
if Itterations % (ItterationFactor * 32) == 0 or stock["6MO"] is None:
# self.LogMSG("({classname})# [MINION] 6MO ({0})".format(ticker,classname=self.ClassName), 5)
error, stock["6MO"] = atock_api.Get6MO(ticker)
if error is True:
stock["6MO"] = None
else:
algos.CalculateBasicPrediction(stock, "6MO")
stock_prices = self.GetPriceListFromStockPeriod(stock["6MO"], "close")
stock["statistics"]["basic"][4] = algos.GetBasicStatistics(stock_prices)
# Get 1 year history
if Itterations % (ItterationFactor * 64) == 0 or stock["1Y"] is None:
# self.LogMSG("({classname})# [MINION] 1Y ({0})".format(ticker,classname=self.ClassName), 5)
error, stock["1Y"] = atock_api.Get1Y(ticker)
if error is True:
stock["1Y"] = None
else:
algos.CalculateBasicPrediction(stock, "1Y")
stock_prices = self.GetPriceListFromStockPeriod(stock["1Y"], "close")
stock["statistics"]["basic"][5] = algos.GetBasicStatistics(stock_prices)
#
# ---- Get Price Difference
#
# Calculate price difference between today and previouse day
if stock["1D"] is not None and stock["5D"] is not None:
today_open = stock["1D"][0]
for idx, item in enumerate(stock["5D"]):
if item["date"] == today_open["date"]:
stock["prev_market_price"] = stock["5D"][idx-1]["close"]
#
# ---- Stock Thresholds
#
# Check for thresholds
for threshold in stock["thresholds"]:
threshold["activated"] = False
if threshold["type"] == 1: # UPPER
if float(threshold["value"]) > float(stock["price"]):
threshold["activated"] = True
else:
threshold["last_emit_ts"] = 0
elif threshold["type"] == 2: # EQUAL
if float(threshold["value"]) == float(stock["price"]):
threshold["activated"] = True
else:
pass # Need to deside what to do here.
elif threshold["type"] == 3: # LOWER
if float(threshold["value"]) < float(stock["price"]):
threshold["activated"] = True
else:
threshold["last_emit_ts"] = 0
else:
pass
# Threshold reached its value
if threshold["activated"] is True:
print("THRESHOLD", float(threshold["value"]), float(stock["price"]), threshold["emit_counter"], int(threshold["last_emit_ts"]))
if time.time() - int(threshold["last_emit_ts"]) > 60 * 30:
threshold["emit_counter"] += 1
threshold["last_emit_ts"] = time.time()
#
# ---- Event Emitted (THRESHOLD)
#
if self.ThresholdEventCallback is not None:
self.ThresholdEventCallback(ticker, stock["price"], threshold)
#
# ---- Other
#
if error is True:
# Stock was not updated correctly
stock["updated"] = False
else:
# Update stock status to updated and update timestamp
stock["updated"] = True
stock["ts_last_updated"] = time.time()
# Free to accept new job
self.ThreadPoolStatus[index] = False
# Signal master in case he waits on signal
self.Signal.set()
except Exception as e:
# BUG #1 - name 'stock' is not defined
self.LogMSG("({classname})# [EXCEPTION] MINION {0} {1}".format(index,str(e),classname=self.ClassName), 5)
stock["updated"] = False
self.ThreadPoolStatus[index] = False
self.Signal.set()
# Wait several MS
time.sleep(Interval)
# Next itteration
Itterations += 1
def StockMonitorWorker(self):
self.MarketPollingInterval = 0.5
# Start your minions
for idx in range(self.ThreadCount):
self.ThreadPoolStatus.append(False)
self.LogMSG("({classname})# [MASTER] Minion ({0}) report for duty".format(idx,classname=self.ClassName), 5)
self.ThreadPool.append(_thread.start_new_thread(self.StockMinion, (idx,)))
# Wait untill minions will report for duty
while self.JoblessMinions < self.ThreadCount:
time.sleep(1)
self.MarketPollingInterval = 10
d_ticker = ""
while self.WorkerRunning is True:
try:
# self.MarketOpen = self.IsMarketOpen()
self.MarketOpen = True
if self.Halt is False:
if self.MarketOpen is True or self.FirstStockUpdateRun is False:
#if self.MarketOpen is False:
# self.MarketPollingInterval = 10
#else:
# self.MarketPollingInterval = 1
# Itterate over all user stocks
for ticker in self.CacheDB:
if self.Halt is True:
# Get out
break
stock = self.CacheDB[ticker]
d_ticker = ticker
# Check if stock is not null
if stock is not None:
# Check if stock need to be updated
if self.NeedUpdate(stock) is True:
stock["updated"] = False
# Find free queue
jobless_minion_found = False
while jobless_minion_found is False:
for idx, item in enumerate(self.ThreadPoolStatus):
if item is False:
# Send job to minion
self.Queues[idx].put({
"ticker": ticker
})
time.sleep(0.1)
jobless_minion_found = True
break
if jobless_minion_found is False:
# self.LogMSG("({classname})# [MASTER] Wait...".format(classname=self.ClassName), 5)
self.Signal.clear()
# free minion not found, wait
self.Signal.wait()
else:
pass
else:
self.LogMSG("({classname})# [Exception] MASTER Stock {0} is null".format(ticker,classname=self.ClassName), 5)
# Rest little bit (we are not realtime stock monitor)
time.sleep(0.5)
if self.FirstStockUpdateRun is False:
self.WaitForMinionsToFinish()
if self.FirstRunDoneCallback is not None:
self.FirstRunDoneCallback()
if self.FirstStockUpdateRun is False:
if self.StockUpdated() is True:
self.FirstStockUpdateRun = True
time.sleep(self.MarketPollingInterval)
except Exception as e:
self.LogMSG("({classname})# [Exeption] MASTER ({0}) ({1})".format(d_ticker,e,classname=self.ClassName), 5)
def PauseMarket(self):
self.LogMSG("({classname})# [PauseMarket]".format(classname=self.ClassName), 5)
self.Halt = True
def ContinueMarket(self):
self.LogMSG("({classname})# [ContinueMarket]".format(classname=self.ClassName), 5)
self.Halt = False
def UpdateStocks(self):
self.FirstStockUpdateRun = False
# ---- THRESHOLDS ----
def GetRunTimeThresholds(self, ticker):
self.Locker.acquire()
try:
stock = self.CacheDB[ticker]
self.Locker.release()
return stock["thresholds"]
except:
pass
self.Locker.release()
def RemoveThreshold(self, ticker, threshold_id):
self.Locker.acquire()
try:
stock = self.CacheDB[ticker]
threshold = None
for idx, item in enumerate(stock["thresholds"]):
if item["id"] == threshold_id:
threshold = idx
break
if threshold is not None:
del stock["thresholds"][threshold]
except:
pass
self.Locker.release()
def RemoveThresholdByStockActionId(self, ticker, act_id):
self.Locker.acquire()
try:
stock = self.CacheDB[ticker]
threshold_idx = -1
thresholds = stock["thresholds"]
for idx, item in enumerate(thresholds):
if item["stock_action_id"] == act_id:
threshold_idx = idx
break
if threshold_idx != -1:
del thresholds[threshold_idx]
except:
pass
self.Locker.release()
def AppendThreshold(self, ticker, threshold):
self.Locker.acquire()
try:
stock = self.CacheDB[ticker]
# Check if threshold exist
for item in stock["thresholds"]:
if item["name"] == threshold["name"]:
# Update
self.Locker.release()
return
# Append
threshold["id"] = time.time()
threshold["last_emit_ts"] = 0
threshold["emit_counter"] = 0
stock["thresholds"].append(threshold)
except Exception as e:
self.LogMSG("({classname})# [EXCEPTION] AppendThreshold {0}".format(str(e),classname=self.ClassName), 5)
self.Locker.release()
# ---- THRESHOLDS ----
def GenerateEmtpyStock(self):
return {
"ticker" : "",
"price" : 0,
"1D" : None,
"5D" : None,
"1MO" : None,
"3MO" : None,
"6MO" : None,
"1Y" : None,
"updated" : False,
"pulled" : False,
"ts_last_updated" : 0,
"thresholds" : [],
"statistics" : {
"basic": [
{
"std" : 0.0,
"var" : 0.0,
"regression": {
"slope": 0.0,
"offset": 0.0,
"r_value": 0.0
}
},
{
"std" : 0.0,
"var" : 0.0,
"regression": {
"slope": 0.0,
"offset": 0.0,
"r_value": 0.0
}
},
{
"std" : 0.0,
"var" : 0.0,
"regression": {
"slope": 0.0,
"offset": 0.0,
"r_value": 0.0
}
},
{
"std" : 0.0,
"var" : 0.0,
"regression": {
"slope": 0.0,
"offset": 0.0,
"r_value": 0.0
}
},
{
"std" : 0.0,
"var" : 0.0,
"regression": {
"slope": 0.0,
"offset": 0.0,
"r_value": 0.0
}
},
{
"std" : 0.0,
"var" : 0.0,
"regression": {
"slope": 0.0,
"offset": 0.0,
"r_value": 0.0
}
}
]
},
"predictions": {
"basic": [
{
"action" : {
"current": "none",
"previouse": "none"
},
"high" : 0.0,
"middle" : 0.0,
"low" : 0.0,
"action_flags": [0,0,0]
},
{
"action" : {
"current": "none",
"previouse": "none"
},
"high" : 0.0,
"middle" : 0.0,
"low" : 0.0,
"action_flags": [0,0,0]
},
{
"action" : {
"current": "none",
"previouse": "none"
},
"high" : 0.0,
"middle" : 0.0,
"low" : 0.0,
"action_flags": [0,0,0]
},
{
"action" : {
"current": "none",
"previouse": "none"
},
"high" : 0.0,
"middle" : 0.0,
"low" : 0.0,
"action_flags": [0,0,0]
},
{
"action" : {
"current": "none",
"previouse": "none"
},
"high" : 0.0,
"middle" : 0.0,
"low" : 0.0,
"action_flags": [0,0,0]
},
{
"action" : {
"current": "none",
"previouse": "none"
},
"high" : 0.0,
"middle" : 0.0,
"low" : 0.0,
"action_flags": [0,0,0]
}
]
}
}
def AppendStock(self, ticker):
self.Locker.acquire()
try:
stock = self.GenerateEmtpyStock()
stock["ticker"] = ticker
self.CacheDB[stock["ticker"]] = stock
self.FirstStockUpdateRun = False
except:
pass
self.Locker.release()
def GetMarketStatus(self):
res = {
"local_stock_market_ready": self.FirstStockUpdateRun
}
return res
def GetCacheDB(self):
return self.CacheDB
def GetStockInformation(self, ticker):
self.Locker.acquire()
try:
if ticker in self.CacheDB:
stock = self.CacheDB[ticker]
self.Locker.release()
return stock
except:
pass
self.Locker.release()
return None
def RemoveStock(self, ticker):
if ticker in self.CacheDB:
self.Locker.acquire()
try:
del self.CacheDB[ticker]
except:
pass
self.Locker.release()
| StarcoderdataPython |
4895602 | speakers={}
f=open("speakers_class.csv",mode="r",encoding="utf-8")
content=f.read()
lines=content.split("\n")
for line in lines[1:-1]:
spk,id_class=line.split(";")
speakers[spk]=id_class
f.close()
f=open("turns_all_info.csv",mode="r",encoding="utf-8")
fo=open("turns_all_info_id_class.csv",mode="w",encoding="utf-8")
fo.write("id_turn;start_time;end_time;id_speaker;gender;id_episode;id_show;id_corpus;id_speaker_class\n")
content=f.read()
lines=content.split("\n")
for line in lines[1:-1]:
id_turn, start_time, end_time, id_speaker, gender, id_episode, id_show, id_corpus = line.split(";")
fo.write(line+";"+speakers[id_speaker]+"\n")
f.close()
fo.close() | StarcoderdataPython |
5080880 | x1=int(input("Enter the Value: "))
x2=int(input("Enter the Value: "))
y1=int(input("Enter the Value: "))
y2=int(input("Enter the Value: "))
distance=((x2-x1)**2 + (y2-y1)**2)**0.5
print(round(distance,4))
| StarcoderdataPython |
1747828 | import os
import pathlib
import pickle
import json
def subdirs(folder, join=True, prefix=None, suffix=None, sort=True):
if join:
l = os.path.join
else:
l = lambda x, y: y
res = [l(folder, i) for i in os.listdir(folder) if os.path.isdir(os.path.join(folder, i))
and (prefix is None or i.startswith(prefix))
and (suffix is None or i.endswith(suffix))]
if sort:
res.sort()
return res
def subfiles(folder, join=True, prefix=None, suffix=None, sort=True):
if join:
l = os.path.join
else:
l = lambda x, y: y
res = [l(folder, i) for i in os.listdir(folder) if os.path.isfile(os.path.join(folder, i))
and (prefix is None or i.startswith(prefix))
and (suffix is None or i.endswith(suffix))]
if sort:
res.sort()
return res
subfolders = subdirs # I am tired of confusing those
def maybe_mkdir_p(directory):
pathlib.Path(directory).mkdir(parents=True, exist_ok=True)
def load_pickle(file, mode='rb'):
with open(file, mode) as f:
a = pickle.load(f)
return a
def write_pickle(obj, file, mode='wb'):
with open(file, mode) as f:
pickle.dump(obj, f)
save_pickle = write_pickle
def load_json(file):
with open(file, 'r') as f:
a = json.load(f)
return a
def save_json(obj, file, indent=4, sort_keys=True):
with open(file, 'w') as f:
json.dump(obj, f, sort_keys=sort_keys, indent=indent)
write_json = save_json
def pardir(path):
return os.path.join(path, os.pardir)
# I'm tired of typing these out
join = os.path.join
isdir = os.path.isdir
isfile = os.path.isfile
listdir = os.listdir
| StarcoderdataPython |
6437414 | # Copyright 2020 Lorna Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import glob
import multiprocessing as mp
import os
import random
import time
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
from torch.utils.data import DataLoader
from tqdm import tqdm
from easydet.config import get_cfg
from model.network.yolov3_tiny import YOLOv3Tiny
from model.network.yolov3 import YOLOv3
from test import evaluate
from utils import CosineDecayLR
from utils import VocDataset
from utils import init_seeds
from utils import select_device
from utils.loss import YoloV3Loss
from utils.process_darknet_weights import load_darknet_weights
mixed_precision = False
try: # Mixed precision training https://github.com/NVIDIA/apex
from apex import amp
except:
mixed_precision = False # not installed
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
# Set bacth_size for builtin models
cfg.TRAIN.MINI_BATCH_SIZE = args.batch_size
cfg.TRAIN.BATCH_SIZE = cfg.TRAIN.MINI_BATCH_SIZE * 4
# Set iou_threshold for builtin models
cfg.TRAIN.IOU_THRESHOLD = args.iou_threshold
# Set workers for builtin models
cfg.TRAIN.GPUS = torch.cuda.device_count()
cfg.TRAIN.WORKERS = cfg.TRAIN.GPUS * 4
# Set weights for builtin models
cfg.TRAIN.WEIGHTS = args.weights
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="Easydet training for built-in models.")
parser.add_argument(
"--config-file",
default="./configs/YOLOV3.yaml",
metavar="FILE",
help="path to config file. (default: ./configs/YOLOV3.yaml)",
)
parser.add_argument(
"--batch-size",
type=int,
default=16,
help="Mini-batch size, (default: 16) this is the total "
"batch size of all GPUs on the current node when"
"using Data Parallel or Distributed Data Parallel"
"Effective batch size is batch_size * accumulate."
)
parser.add_argument(
"--iou-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown. (default: 0.5).",
)
parser.add_argument(
'--weights',
type=str,
default='',
help='path to weights file. (default: ``).'
)
parser.add_argument(
'--resume',
action='store_true',
default=False,
help='resume training flag.'
)
parser.add_argument(
"--device",
default="0",
help="device id (default: ``0``)"
)
return parser
def train(cfg):
# Initialize
init_seeds()
image_size_min = 6.6 # 320 / 32 / 1.5
image_size_max = 28.5 # 320 / 32 / 28.5
if cfg.TRAIN.MULTI_SCALE:
image_size_min = round(cfg.TRAIN.IMAGE_SIZE / 32 / 1.5)
image_size_max = round(cfg.TRAIN.IMAGE_SIZE / 32 * 1.5)
image_size = image_size_max * 32 # initiate with maximum multi_scale size
print(f"Using multi-scale {image_size_min * 32} - {image_size}")
# Remove previous results
for files in glob.glob("results.txt"):
os.remove(files)
# Initialize model
model = YOLOv3(cfg).to(device)
# Optimizer
optimizer = optim.SGD(model.parameters(),
lr=cfg.TRAIN.LR,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.DECAY,
nesterov=True)
# Define the loss function calculation formula of the model
compute_loss = YoloV3Loss(cfg)
epoch = 0
start_epoch = 0
best_maps = 0.0
context = None
# Dataset
# Apply augmentation hyperparameters
train_dataset = VocDataset(anno_file_type=cfg.TRAIN.DATASET, image_size=cfg.TRAIN.IMAGE_SIZE,
cfg=cfg)
# Dataloader
train_dataloader = DataLoader(train_dataset,
batch_size=cfg.TRAIN.MINI_BATCH_SIZE,
num_workers=cfg.TRAIN.WORKERS,
shuffle=cfg.TRAIN.SHUFFLE,
pin_memory=cfg.TRAIN.PIN_MENORY)
if cfg.TRAIN.WEIGHTS.endswith(".pth"):
state = torch.load(cfg.TRAIN.WEIGHTS, map_location=device)
# load model
try:
state["state_dict"] = {k: v for k, v in state["state_dict"].items()
if model.state_dict()[k].numel() == v.numel()}
model.load_state_dict(state["state_dict"], strict=False)
except KeyError as e:
error_msg = f"{cfg.TRAIN.WEIGHTS} is not compatible with {cfg.CONFIG_FILE}. "
error_msg += f"Specify --weights `` or specify a --config-file "
error_msg += f"compatible with {cfg.TRAIN.WEIGHTS}. "
raise KeyError(error_msg) from e
# load optimizer
if state["optimizer"] is not None:
optimizer.load_state_dict(state["optimizer"])
best_maps = state["best_maps"]
# load results
if state.get("training_results") is not None:
with open("results.txt", "w") as file:
file.write(state["training_results"]) # write results.txt
start_epoch = state["batches"] + 1 // len(train_dataloader)
del state
elif len(cfg.TRAIN.WEIGHTS) > 0:
# possible weights are "*.weights", "yolov3-tiny.conv.15", "darknet53.conv.74" etc.
load_darknet_weights(model, cfg.TRAIN.WEIGHTS)
else:
print("Pre training model weight not loaded.")
# Mixed precision training https://github.com/NVIDIA/apex
if mixed_precision:
# skip print amp info
model, optimizer = amp.initialize(model, optimizer, opt_level="O1", verbosity=0)
# source https://arxiv.org/pdf/1812.01187.pdf
scheduler = CosineDecayLR(optimizer,
max_batches=cfg.TRAIN.MAX_BATCHES,
lr=cfg.TRAIN.LR,
warmup=cfg.TRAIN.WARMUP_BATCHES)
# Initialize distributed training
if device.type != "cpu" and torch.cuda.device_count() > 1 and torch.distributed.is_available():
dist.init_process_group(backend="nccl", # "distributed backend"
# distributed training init method
init_method="tcp://127.0.0.1:9999",
# number of nodes for distributed training
world_size=1,
# distributed training node rank
rank=0)
model = torch.nn.parallel.DistributedDataParallel(model)
model.backbone = model.module.backbone
# Model EMA
# TODO: ema = ModelEMA(model, decay=0.9998)
# Start training
batches_num = len(train_dataloader) # number of batches
# 'loss_GIOU', 'loss_Confidence', 'loss_Classification' 'loss'
results = (0, 0, 0, 0)
epochs = cfg.TRAIN.MAX_BATCHES // len(train_dataloader)
print(f"Using {cfg.TRAIN.WORKERS} dataloader workers.")
print(f"Starting training {cfg.TRAIN.MAX_BATCHES} batches for {epochs} epochs...")
start_time = time.time()
for epoch in range(start_epoch, epochs):
model.train()
# init batches
batches = 0
mean_losses = torch.zeros(4)
print("\n")
print(("%10s" * 7) % ("Batch", "memory", "GIoU", "conf", "cls", "total", " image_size"))
progress_bar = tqdm(enumerate(train_dataloader), total=batches_num)
for index, (images, small_label_bbox, medium_label_bbox, large_label_bbox,
small_bbox, medium_bbox, large_bbox) in progress_bar:
# number integrated batches (since train start)
batches = index + len(train_dataloader) * epoch
scheduler.step(batches)
images = images.to(device)
small_label_bbox = small_label_bbox.to(device)
medium_label_bbox = medium_label_bbox.to(device)
large_label_bbox = large_label_bbox.to(device)
small_bbox = small_bbox.to(device)
medium_bbox = medium_bbox.to(device)
large_bbox = large_bbox.to(device)
# Hyper parameter Burn-in
if batches <= cfg.TRAIN.WARMUP_BATCHES:
for m in model.named_modules():
if m[0].endswith('BatchNorm2d'):
m[1].track_running_stats = batches == cfg.TRAIN.WARMUP_BATCHES
# Run model
pred, raw = model(images)
# Compute loss
loss, loss_giou, loss_conf, loss_cls = compute_loss(pred,
raw,
small_label_bbox,
medium_label_bbox,
large_label_bbox,
small_bbox,
medium_bbox,
large_bbox)
# Compute gradient
if mixed_precision:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# Optimize accumulated gradient
if batches % cfg.TRAIN.BATCH_SIZE // cfg.TRAIN.MINI_BATCH_SIZE == 0:
optimizer.step()
optimizer.zero_grad()
# TODO: ema.update(model)
# Print batch results
# update mean losses
loss_items = torch.tensor([loss_giou, loss_conf, loss_cls, loss])
mean_losses = (mean_losses * index + loss_items) / (index + 1)
memory = f"{torch.cuda.memory_cached() / 1E9 if torch.cuda.is_available() else 0:.2f}G"
context = ("%10s" * 2 + "%10.3g" * 5) % (
"%g/%g" % (batches + 1, cfg.TRAIN.MAX_BATCHES), memory, *mean_losses,
train_dataset.image_size)
progress_bar.set_description(context)
# Multi-Scale training
if cfg.TRAIN.MULTI_SCALE:
# adjust img_size (67% - 150%) every 10 batch size
if batches % cfg.TRAIN.RESIZE_INTERVAL == 0:
train_dataset.image_size = random.randrange(image_size_min,
image_size_max + 1) * 32
# Write Tensorboard results
if tb_writer:
# 'loss_GIOU', 'loss_Confidence', 'loss_Classification' 'loss'
titles = ["GIoU", "Confidence", "Classification", "Train loss"]
for xi, title in zip(list(mean_losses) + list(results), titles):
tb_writer.add_scalar(title, xi, index)
# Process epoch results
# TODO: ema.update_attr(model)
final_epoch = epoch + 1 == epochs
# Calculate mAP
# skip first epoch
maps = 0.
if epoch > 0:
maps = evaluate(cfg, args)
# Write epoch results
with open("results.txt", "a") as f:
# 'loss_GIOU', 'loss_Confidence', 'loss_Classification' 'loss', 'maps'
f.write(context + "%10.3g" * 1 % maps)
f.write("\n")
# Update best mAP
if maps > best_maps:
best_maps = maps
# Save training results
with open("results.txt", 'r') as f:
# Create checkpoint
state = {'batches': batches,
'best_maps': maps,
'training_results': f.read(),
'state_dict': model.state_dict(),
'optimizer': None
if final_epoch else optimizer.state_dict()}
# Save last checkpoint
torch.save(state, "weights/checkpoint.pth")
# Save best checkpoint
if best_maps == maps:
state = {'batches': -1,
'best_maps': None,
'training_results': None,
'state_dict': model.state_dict(),
'optimizer': None}
torch.save(state, "weights/model_best.pth")
# Delete checkpoint
del state
print(f"{epoch - start_epoch} epochs completed "
f"in {(time.time() - start_time) / 3600:.3f} hours.\n")
dist.destroy_process_group() if torch.cuda.device_count() > 1 else None
torch.cuda.empty_cache()
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
cfg = setup_cfg(args)
args.weights = "weights/checkpoint.pth" if args.resume else args.weights
print(args)
device = select_device(args.device, apex=mixed_precision)
if device.type == "cpu":
mixed_precision = False
try:
os.makedirs("weights")
except OSError:
pass
try:
# Start Tensorboard with "tensorboard --logdir=runs"
from torch.utils.tensorboard import SummaryWriter
tb_writer = SummaryWriter()
except:
pass
train(cfg)
| StarcoderdataPython |
6577695 | <filename>collecting users/fake-user_collector.py
# -*- coding: utf-8 -*-
"""
Created on Mar 15 2020
@author: GerH
"""
from selenium import webdriver
import time
from datetime import datetime
import threading
import sys
from random import randint
#this thread takes fake user data from the website and puts it into a csv file
def collector(users_num, url_part, thread_num):
#measure time
start_time = time.time()
print("starting up..." + str(start_time) + ", " + datetime.now().strftime("%d-%m-%Y_%H-%M-%S"))
#current url (with names-country combo)
url = "https://www.fakenamegenerator.com/gen-random-"+url_part+".php"
#save time & resources by not loading images
firefox_profile = webdriver.FirefoxProfile()
firefox_profile.set_preference('permissions.default.image', 2)
firefox_profile.set_preference('extensions.contentblocker.enabled', True)
firefox_profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false')
driver = webdriver.Firefox(firefox_profile=firefox_profile)
#create & open csv file
f = open("users_db/userdata_"+url_part+"-"+str(thread_num)+"_"+datetime.now().strftime("%d-%m-%Y_%H-%M-%S")+".csv","a")
users_collected = 0
while(users_collected<users_num):
driver.get(url)
try:
#output progress
if(users_collected % 10 == 0):
print(url_part + " " + str(thread_num) + ": " + str(users_collected)+"/"+str(users_num))
#uncomment if necessary
#sleep(1)
#find data on website
address = driver.find_element_by_class_name("address")
entries = driver.find_elements_by_class_name("dl-horizontal")
address_entries= address.text.split("\n")
name_arr = address_entries[0].rsplit(' ', 1)
street_arr = address_entries[1].rsplit(' ', 1)
location_arr = address_entries[2].split(' ', 1)
#extract user data
first_name = name_arr[0]
last_name = name_arr[1]
street_name = street_arr[0]
street_num = street_arr[1]
area_code = location_arr[0]
area_name = location_arr[1]
mother_maiden_name = entries[0].text.split("\n")[1]
geo_coord = entries[1].text.split("\n")[1]
phone_num = entries[2].text.split("\n")[1]
country_code = entries[3].text.split("\n")[1]
birthday = entries[4].text.split("\n")[1]
age = entries[5].text.split("\n")[1]
zodiac_sign = entries[6].text.split("\n")[1]
email = entries[7].text.split("\n")[1].replace("\nThis is a real email address. Click here to activate it!","")
username = entries[8].text.split("\n")[1]
password = entries[9].text.split("\n")[1]
website = entries[10].text.split("\n")[1]
browser_user_agent = entries[11].text.split("\n")[1]
card_number = entries[12].text.split("\n")[1]
card_expiry = entries[13].text.split("\n")[1]
card_cvc2 = entries[14].text.split("\n")[1]
company = entries[15].text.split("\n")[1]
occupation = entries[16].text.split("\n")[1]
height = entries[17].text.split("\n")[1].replace("\"","''")
weight = entries[18].text.split("\n")[1]
blood_type = entries[19].text.split("\n")[1]
ups_tracking_num = entries[20].text.split("\n")[1]
western_union_num = entries[21].text.split("\n")[1]
money_gram_num = entries[22].text.split("\n")[1]
fav_color = entries[23].text.split("\n")[1]
vehicle = entries[24].text.split("\n")[1]
guid = entries[25].text.split("\n")[1]
svnr_year = birthday[-2:]
svnr_month = birthday.replace(",","").split(" ")[0]
svnr_day = birthday.replace(",","").split(" ")[1]
if svnr_month == "January":
svnr_month = 1
elif svnr_month == "February":
svnr_month = 2
elif svnr_month == "March":
svnr_month = 3
elif svnr_month == "April":
svnr_month = 4
elif svnr_month == "May":
svnr_month = 5
elif svnr_month == "June":
svnr_month = 6
elif svnr_month == "July":
svnr_month = 7
elif svnr_month == "August":
svnr_month = 8
elif svnr_month == "September":
svnr_month = 9
elif svnr_month == "October":
svnr_month = 10
elif svnr_month == "November":
svnr_month = 11
elif svnr_month == "December":
svnr_month = 12
else:
print("ERROR!!!")
svnr_month = 10
svnr = randint(0, 9999)*1000000 + int(svnr_day)*10000 + svnr_month*100 + int(svnr_year)
#print("\nBday: "+birthday)
#print("svnr: "+str(svnr))
svnr_str = str(svnr)
if(svnr<1000000):
svnr_str = "0"+svnr_str
if(svnr<10000000):
svnr_str = "0"+svnr_str
if(svnr<100000000):
svnr_str = "0"+svnr_str
if(svnr<1000000000):
svnr_str = "0"+svnr_str
#Sternzeichen übersetzen, line erstellen und dazu schreiben
zodiac_sign=zodiac_sign.replace("Cancer","Krebs").replace("Taurus","Stier").replace("Pisces","Fische").replace("Aries","Widder").replace("Libra","Waage").replace("Aquarius","Wassermann").replace("Capricorn","Steinbock").replace("Scorpio","Skorpion").replace("Virgo","Jungfrau").replace("Sagittarius","Schütze").replace("Gemini","Zwillinge").replace("Leo","Löwe")
csv_line = "\"" + svnr_str + "\";\"" + first_name + "\";\"" + last_name + "\";\"" + street_name + "\";\"" + street_num + "\";\"" + area_code + "\";\"" + area_name + "\";\"" + geo_coord + "\";\"" + phone_num + "\";\"" + country_code + "\";\"" + birthday + "\";\"" + age + "\";\"" + mother_maiden_name + "\";\"" + zodiac_sign + "\";\"" + email + "\";\"" + username + "\";\"" + password + "\";\"" + website + "\";\"" + browser_user_agent + "\";\"" + card_number + "\";\"" + card_expiry + "\";\"" + card_cvc2 + "\";\"" + company + "\";\"" + occupation + "\";\"" + height + "\";\"" + weight + "\";\"" + blood_type + "\";\"" + ups_tracking_num + "\";\"" + western_union_num + "\";\"" + money_gram_num + "\";\"" + fav_color + "\";\"" + vehicle + "\";\"" + guid + "\"\n"
f.write(csv_line)
users_collected+=1
except:
e = sys.exc_info()[0]
print( "Couldn't load data from "+url+": \n%s" % e )
#close browser window & file, and show user how long the process took to finish
f.close()
driver.close()
end_time = time.time()
elapsed_time = end_time - start_time
print(url_part + " thread elapsed time: " + str(elapsed_time) + "(" + str(start_time) + "-" + str(end_time) + ")")
#start threads
q = threading.Thread(target=collector, args=(10000, "gr-as", 1))
q.start()
r = threading.Thread(target=collector, args=(10000, "gr-as", 2))
r.start()
s = threading.Thread(target=collector, args=(10000, "gr-as", 3))
s.start()
t = threading.Thread(target=collector, args=(10000, "gr-as", 4))
t.start()
u = threading.Thread(target=collector, args=(10000, "gr-as", 5))
u.start()
v = threading.Thread(target=collector, args=(10000, "gr-as", 6))
v.start()
w = threading.Thread(target=collector, args=(10000, "gr-as", 7))
w.start()
x = threading.Thread(target=collector, args=(10000, "gr-as", 8))
x.start()
y = threading.Thread(target=collector, args=(10000, "gr-as", 9))
y.start()
z = threading.Thread(target=collector, args=(10000, "gr-as", 10))
z.start()
| StarcoderdataPython |
5079552 | <reponame>0xdc/wk<filename>wk/models.py
from __future__ import unicode_literals
from django.db import models
class WellKnown(models.Model):
key = models.CharField(max_length=255,unique=True)
value = models.TextField()
def __str__(self):
return self.key
| StarcoderdataPython |
197839 | # -*- coding: utf-8 -*-
"""cerberus_ac plugins module."""
try:
from archan import Provider, Argument, DesignStructureMatrix
class Privileges(Provider):
"""Cerberus AC provider for Archan."""
identifier = 'cerberus_ac.Privileges'
name = 'Privileges'
description = 'Provide matrix data about privileges in an access ' \
'control scheme.'
def get_dsm(self):
"""
Provide matrix data about privileges in an access control scheme.
Returns:
archan.DSM: instance of archan DSM.
"""
data = []
keys = []
return DesignStructureMatrix(data=data, entities=keys)
except ImportError:
class Privileges(object):
"""Empty cerberus_ac provider."""
| StarcoderdataPython |
8189226 | from .data_structures import ASPECTDataset # noqa: F401; noqa: F401
from .data_structures import ASPECTUnstructuredIndex # noqa: F401
from .data_structures import ASPECTUnstructuredMesh # noqa: F401
from .fields import ASPECTFieldInfo # noqa: F401
from .io import IOHandlerASPECT # noqa: F401
| StarcoderdataPython |
9671157 | <reponame>betaredex/pfmisc<gh_stars>0
#!/usr/bin/env python3.5
import sys
import os
import json
import pudb
try:
from ._colors import Colors
from .debug import debug
from .C_snode import *
from .error import *
except:
from _colors import Colors
from debug import debug
from C_snode import *
from error import *
class someOtherClass():
"""
Some other class
"""
def __init__(self, *args, **kwargs):
"""
"""
self.dp = debug(verbosity=0, level=-1, within = "someOtherClass")
def say(self, msg):
print('\n* Now we are in a different class in this module...')
print('* Note the different class and method in the debug output.')
print('* calling: self.dp.qprint(msg):')
self.dp.qprint(msg)
class pfmisc():
"""
Example of how to use the local misc dependencies
"""
# An error declaration block
_dictErr = {
'someError1' : {
'action' : 'trying to parse image file specified, ',
'error' : 'wrong format found. Must be [<index>:]<filename>',
'exitCode' : 1},
'someError2': {
'action' : 'trying to read input <tagFileList>, ',
'error' : 'could not access/read file -- does it exist? Do you have permission?',
'exitCode' : 20
}
}
def col2_print(self, str_left, str_right):
print(Colors.WHITE +
('%*s' % (self.LC, str_left)), end='')
print(Colors.LIGHT_BLUE +
('%*s' % (self.RC, str_right)) + Colors.NO_COLOUR)
def __init__(self, *args, **kwargs):
"""
Holder for constructor of class -- allows for explicit setting
of member 'self' variables.
:return:
"""
self.LC = 40
self.RC = 40
self.args = None
self.str_desc = 'pfmisc'
self.str_name = self.str_desc
self.str_version = ''
self.dp = debug(verbosity = 0,
level = -1,
within = 'pfmisc')
self.dp2 = debug(verbosity = 0,
level = -1,
within = 'pfmisc',
debugToFile = True,
debugFile = '/tmp/pfmisc.txt')
def demo(self, *args, **kwargs):
"""
Simple run method
"""
print('* calling: self.dp.qprint("Why hello there, world!"):')
self.dp.qprint("Why hello there, world!")
print('* calling: self.dp2.qprint("Why hello there, world! In a debugging file!"):')
self.dp2.qprint("Why hello there, world! In a debugging file!")
print('* Check on /tmp/pfmisc.txt')
print('* calling: self.dp.qprint("Why hello there, world! With teeFile!", teeFile="/tmp/pfmisc-teefile.txt", teeMode = "w+"):')
self.dp.qprint("Why hello there, world! With teeFile!", teeFile="/tmp/pfmisc-teefile.txt", teeMode = "w+")
print('* Check on /tmp/pfmisc-teefile.txt')
other = someOtherClass()
other.say("And this is from a different class")
for str_comms in ['status', 'error', 'tx', 'rx']:
print('\n* calling: self.dp.qprint("This string is tagged with %s" % str_comms, ', end='')
print("comms = '%s')" % str_comms)
self.dp.qprint("This string is tagged with '%s'" % str_comms, comms = str_comms)
print("And here is warning...")
warn(
self, 'someError1',
header = 'This is only a warning!',
drawBox = True
)
| StarcoderdataPython |
4858996 | import doctest
import todo
doctest.testmod(todo)
| StarcoderdataPython |
3391391 | import os
import time
from kombu import Connection
from kombu import Exchange
from kombu import Producer
from utils import get_logger
from utils import RABBITMQ_URI
log = get_logger()
class Scheduler:
def run(self):
"""
Entry function for this service that runs a RabbitMQ worker through Kombu.
"""
try:
with Connection(RABBITMQ_URI) as connection:
self.worker = self.Worker(connection)
except Exception:
log.exception("exception")
except KeyboardInterrupt:
pass
finally:
log.info("stopped")
class Worker:
def __init__(self, connection):
self.connection = connection
# Time in secs to gather entries to perform a bulk operation
self.time_to_wait = float(os.getenv("BULK_TIMER", 1))
self.db_clock_exchange = Exchange(
"db-clock",
type="direct",
channel=connection,
durable=False,
delivery_mode=1,
)
self.db_clock_exchange.declare()
log.info("started")
self._db_clock_send()
def _db_clock_send(self):
with Producer(self.connection) as producer:
while True:
time.sleep(self.time_to_wait)
producer.publish(
{"op": "bulk_operation"},
exchange=self.db_clock_exchange,
routing_key="pulse",
retry=True,
priority=3,
serializer="ujson",
)
def run():
service = Scheduler()
service.run()
if __name__ == "__main__":
run()
| StarcoderdataPython |
10569 | #!/usr/bin/python
import sys
import re
def iptohex(ip):
octets = ip.split('.')
hex_octets = []
for octet in octets:
if int(octet) < 16:
hex_octets.append('0' + hex(int(octet))[2:])
else:
hex_octets.append(hex(int(octet))[2:])
hex_octets = ''.join(hex_octets)
return hex_octets
def main():
if (len(sys.argv) != 2):
print 'Usage: ./iptohex.py x.x.x.x'
sys.exit(1)
ip = sys.argv[1]
invalidInput = re.search(r'[^0-9\.]', ip)
if invalidInput:
print 'Usage: ./iptohex.py x.x.x.x'
hex_ip = iptohex(ip)
print "Hex IP: %s " % (hex_ip)
print "Decimal IP: %s" % (ip)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1920614 | <filename>nemoobot/bot/antispam.py
import re
from typing import Tuple
CAPS_WARNING_MESSAGE = 'Calm down! БЕЗ КАПСА ТУТ!'
URLS_WARNING_MESSAGE = 'Ссылки в чате запрещены.'
BANNED_WORD_WARNING_MESSAGE = 'Аккуратнее с выражениями.'
class AntiSpam:
def __init__(self, is_active=False, caps=False, urls=False, banned_words=None):
self.is_active = is_active
self.caps = caps
self.urls = urls
self.banned_words = banned_words
self.word_pattern = re.compile(r'\w+')
self.url_pattern = re.compile(
r'((?:(?:[a-z])*:(?:\/\/)*)*(?:www\.)*(?:[a-zA-Z0-9_\.]*(?:@)?)?[a-z]+\.(?:ru|net|com|ua|uk|cn))'
)
def check_message(self, message) -> Tuple[bool, str]:
if self.is_active:
if self._check_is_upper(message):
return True, CAPS_WARNING_MESSAGE
if self._check_urls(message):
return True, URLS_WARNING_MESSAGE
if self._check_banned_words(message):
return True, BANNED_WORD_WARNING_MESSAGE
return False, ''
def _check_is_upper(self, message: str) -> bool:
# TODO use regexp to check upper case words in message
if self.caps:
return message.isupper()
def _check_banned_words(self, message: str) -> bool:
if self.banned_words:
words = re.findall(self.word_pattern, message.lower())
if self.banned_words & set(words):
return True
def _check_urls(self, message: str) -> bool:
if self.urls:
if re.findall(self.url_pattern, message):
return True
| StarcoderdataPython |
9669458 | <reponame>winnerineast/taichi
import taichi as ti
import matplotlib.pyplot as plt
import math
import sys
x = ti.global_var(dt=ti.f32)
v = ti.global_var(dt=ti.f32)
a = ti.global_var(dt=ti.f32)
loss = ti.global_var(dt=ti.f32)
damping = ti.global_var(dt=ti.f32)
max_timesteps = 1024 * 1024
dt = 0.001
@ti.layout
def place():
ti.root.dense(ti.i, max_timesteps).place(x, v)
ti.root.place(a, damping, loss)
ti.root.lazy_grad()
@ti.kernel
def advance(t: ti.i32):
v[t] = damping[None] * v[t - 1] + a[None]
x[t] = x[t - 1] + dt * v[t]
@ti.kernel
def compute_loss(t: ti.i32):
loss[None] = x[t]
def gradient(alpha, num_steps):
damping[None] = math.exp(-dt * alpha)
a[None] = 1
with ti.Tape(loss):
for i in range(1, num_steps):
advance(i)
compute_loss(num_steps - 1)
return loss[None]
large = False
if len(sys.argv) > 1:
large = True
# c = ['r', 'g', 'b', 'y', 'k']
for i, alpha in enumerate([0, 1, 3, 10]):
xs, ys = [], []
grads = []
for num_steps in range(0, 10000 if large else 1000, 50):
g = gradient(alpha, num_steps)
xs.append(num_steps)
ys.append(g)
plt.plot(xs, ys, label="damping={}".format(alpha))
# plt.loglog()
fig = plt.gcf()
fig.set_size_inches(5, 3)
plt.title("Gradient Explosion without Damping")
plt.ylabel("Gradient")
plt.xlabel("Time steps")
plt.legend()
if large:
plt.ylim(0, 3000)
plt.tight_layout()
plt.show()
| StarcoderdataPython |
6492499 | <reponame>GenBill/notebooks
import random
import numpy as np
import torch
import torch.nn as nn
import torchvision
from IPython import display
from torch.utils import data
from torchvision import transforms
from matplotlib import pyplot as plt
mnist_train = torchvision.datasets.FashionMNIST(root='./Datasets/FashionMNIST', train=True, download=True, transform=transforms.ToTensor())
mnist_test = torchvision.datasets.FashionMNIST(root='./Datasets/FashionMNIST', train=False, download=True, transform=transforms.ToTensor())
print(type(mnist_train))
print(len(mnist_train),len(mnist_test))
# 来访问一个样本:
feature,label = mnist_train[0]
print(feature.shape,label)
# 数值标签转为文本标签
def get_fashion_mnist_labels(labels):
text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
return [text_labels[int(i)] for i in labels]
def show_fashion_mnist(images, labels):
# 可以在一行内画出多个图
# 这里的_表示我们忽略(不使用)的变量
_, figs = plt.subplots(1, len(images), figsize=(14, 14))
for f, img, lbl in zip(figs, images, labels):
# f.imshow(img.view((56, 14)).numpy())
f.imshow(img.view((28, 28)).numpy())
f.set_title(lbl)
f.axes.get_xaxis().set_visible(False)
f.axes.get_yaxis().set_visible(False)
plt.show()
x, y = [], []
for i in range(10):
x.append(mnist_train[i][0])
y.append(mnist_train[i][1])
show_fashion_mnist(x, get_fashion_mnist_labels(y))
print(type(x))
# 定义模型
num_inputs=784 #28*28 图片像素尺寸
num_outputs=10
num_hiddens=256
class Mymodel(nn.Module):
def __init__(self, num_inputs, num_hiddens, num_outputs):
super().__init__()
self.linear = nn.Sequential(
nn.Linear(num_inputs, num_hiddens),
nn.ReLU(),
nn.Linear(num_hiddens,num_outputs)
)
def forward(self, x):
# x.shape: (batch, 1, 28, 28)
# view(-1) 自动计算剩余行列数
y = self.linear(x.view(x.shape[0],-1))
# y = self.linear(x.view(x.shape[0],784))
return y
net = Mymodel(num_inputs, num_hiddens, num_outputs)
#net = torch.nn.DataParallel(net)
print(net)
# 初始化参数
for params in net.parameters():
nn.init.normal_(params, mean=0, std=0.01)
batch_size = 256
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True)
test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False)
print(len(train_iter))
## 在这停顿!
def evaluate_accuracy(data_iter, net):
acc_sum, n = 0.0, 0
for X, y in data_iter:
acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
n += y.shape[0]
return acc_sum / n
# 因为重复性较强,这里定义成函数
def train_model(net, train_iter, test_iter, loss,num_epochs,
params=None, lr=None, optimizer=None):
print('num_epochs:',num_epochs)
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
#train_l_sum loss的求和
#train_acc_sum acc的求和
for x, y in train_iter:
#y_hat y预测
y_hat = net(x)
#计算loss
l = loss(y_hat, y).sum()
# 梯度清零
if optimizer is not None:
optimizer.zero_grad()
elif params is not None and params[0].grad is not None:
for param in params:
param.grad.data.zero_()
l.backward()
if optimizer is None:
d2l.sgd(params, lr, batch_size)
else:
optimizer.step() # “多层感知器的简洁实现”一节将用到
train_l_sum += l.item()
#argmax 取最大值,相当于取预测里面的最大值
train_acc_sum += (y_hat.argmax(dim=1) == y).float().sum().item()
n += y.shape[0]
test_acc = evaluate_accuracy(test_iter, net)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
% (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))
batch_size = 256
loss = torch.nn.CrossEntropyLoss()#.cuda()
# 需要看
optimizer = torch.optim.SGD(net.parameters(), lr=0.5)
num_epochs = 5
train_model(net, train_iter, test_iter, loss, num_epochs, None, None, optimizer)
| StarcoderdataPython |
8137986 | # <NAME>
# 2/25/2021
# selenium imports
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
# scheduling tasks
import schedule
import time
# import other scripts in directory
import settings
import functions
userAgent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.50 Safari/537.36'
# setting up chrome driver
chromeOptions = Options()
chromeOptions.add_argument('--no-sandbox')
chromeOptions.add_argument('--disable-dev-shm-usage')
chromeOptions.add_argument("--window-size=1920,1080")
chromeOptions.add_argument('--ignore-certificate-errors')
chromeOptions.add_argument('--allow-running-insecure-content')
chromeOptions.add_argument(f'user-agent={userAgent}')
driver = webdriver.Chrome('./chromedriver', options=chromeOptions)
previousGrades = {}
functions.login(driver)
functions.getInitialGrades(driver, settings.classes, previousGrades)
functions.logout(driver)
def run():
functions.login(driver)
functions.checkAndUpdateGrades(driver, previousGrades)
functions.logout(driver)
while True:
run()
time.sleep(settings.REFRESH_INTERVAL * 60)
| StarcoderdataPython |
3444278 | <reponame>tdiprima/code
# SomeSound.py
audio=file('/dev/dsp', 'wb')
def main():
for a in range(0,25,1):
for b in range(15,112,1):
for c in range(0,1,1):
audio.write(chr(127+b)+chr(127+b)+chr(127+b)+chr(127+b)+chr(127-b)+chr(127-b)+chr(127-b)+chr(127-b))
for b in range(112,15,-1):
for c in range(0,1,1):
audio.write(chr(127+b)+chr(127+b)+chr(127+b)+chr(127+b)+chr(127-b)+chr(127-b)+chr(127-b)+chr(127-b))
main()
audio.close()
# The modified code below, that you can experiment with. Be aware of wordwrapping, etc...
# SomeSound.py
audio=file('/dev/dsp', 'wb')
def main():
# "a" is unimportant!
for a in range(0,25,1):
f=15
g=112
h=1
i=0
j=1
k=1
l=112
m=15
n=-1
o=0
p=1
q=1
for b in range(f,g,h):
for c in range(i,j,k):
audio.write(chr(127+b)+chr(127+b)+chr(127+b)+chr(127+b)+chr(127-b)+chr(127-b)+chr(127-b)+chr(127-b))
for d in range(l,m,n):
for e in range(o,p,q):
audio.write(chr(127+d)+chr(127+d)+chr(127+d)+chr(127+d)+chr(127-d)+chr(127-d)+chr(127-d)+chr(127-d))
main()
audio.close()
| StarcoderdataPython |
1611797 | # -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Workdir class."""
import glob
import json
import logging
import os
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from neural_compressor.ux.utils.templates.metric import Metric
from neural_compressor.ux.utils.utils import _load_json_as_dict
from neural_compressor.ux.utils.workload.workload import Workload
from neural_compressor.ux.utils.workload.workloads_list import WorkloadInfo
class Workdir:
"""Metric represents data which is sent in get_workloads_list endpoint."""
def __init__(
self,
request_id: Optional[str] = None,
project_name: Optional[str] = None,
model_path: Optional[str] = None,
input_precision: Optional[str] = None,
model_output_path: Optional[str] = None,
output_precision: Optional[str] = None,
mode: Optional[str] = None,
metric: Optional[Union[dict, Metric]] = None,
overwrite: bool = True,
created_at: Optional[str] = None,
supports_profiling: Optional[bool] = None,
) -> None:
"""Initialize workdir class."""
self.workspace_path = os.path.join(os.environ.get("HOME", ""), "workdir")
self.workdir_path = os.path.join(os.environ.get("HOME", ""), ".neural_compressor")
self.ensure_working_path_exists()
self.workloads_json = os.path.join(self.workdir_path, "workloads_list.json")
self.request_id = request_id
self.workload_path: str
if not metric:
metric = Metric()
if os.path.isfile(self.workloads_json):
self.workloads_data = self.load()
else:
self.workloads_data = {
"active_workspace_path": self.workspace_path,
"workloads": {},
"version": "4",
}
workload_data = self.get_workload_data(request_id)
if workload_data:
self.workload_path = workload_data.get("workload_path", "")
elif self.workspace_path and request_id:
workload_name = request_id
if model_path:
workload_name = "_".join(
[
Path(model_path).stem,
request_id,
],
)
self.workload_path = os.path.join(
self.workspace_path,
"workloads",
workload_name,
)
if request_id and overwrite:
self.update_data(
request_id=request_id,
project_name=project_name,
model_path=model_path,
input_precision=input_precision,
model_output_path=model_output_path,
output_precision=output_precision,
mode=mode,
metric=metric,
created_at=created_at,
supports_profiling=supports_profiling,
)
def load(self) -> dict:
"""Load json with workloads list."""
logging.info(f"Loading workloads list from {self.workloads_json}")
with open(self.workloads_json, encoding="utf-8") as workloads_list:
self.workloads_data = json.load(workloads_list)
return self.workloads_data
def dump(self) -> None:
"""Dump workloads information to json."""
with open(self.workloads_json, "w") as f:
json.dump(self.workloads_data, f, indent=4)
logging.info(f"Successfully saved workload to {self.workloads_json}")
def ensure_working_path_exists(self) -> None:
"""Set workdir if doesn't exist create."""
os.makedirs(self.workdir_path, exist_ok=True)
def map_to_response(self) -> dict:
"""Map to response data."""
data: Dict[str, Any] = {
"workspace_path": self.get_active_workspace(),
"workloads_list": [],
}
for key, value in self.workloads_data["workloads"].items():
data["workloads_list"].append(value)
return data
def update_data(
self,
request_id: str,
project_name: Optional[str] = None,
model_path: Optional[str] = None,
input_precision: Optional[str] = None,
model_output_path: Optional[str] = None,
output_precision: Optional[str] = None,
mode: Optional[str] = None,
metric: Optional[Union[Dict[str, Any], Metric]] = Metric(),
status: Optional[str] = None,
execution_details: Optional[Dict[str, Any]] = None,
created_at: Optional[str] = None,
supports_profiling: Optional[bool] = None,
) -> None:
"""Update data in workloads.list_json."""
self.load()
existing_data = self.get_workload_data(request_id)
if project_name is None:
project_name = existing_data.get("project_name", None)
if created_at is None:
created_at = existing_data.get("created_at", None)
if supports_profiling is None:
supports_profiling = existing_data.get("supports_profiling", None)
workload_info = WorkloadInfo(
workload_path=self.workload_path,
request_id=request_id,
project_name=project_name,
model_path=model_path,
input_precision=input_precision,
model_output_path=model_output_path,
output_precision=output_precision,
mode=mode,
metric=metric,
status=status,
code_template_path=self.template_path,
execution_details=execution_details,
created_at=created_at,
supports_profiling=supports_profiling,
).serialize()
self.workloads_data["workloads"][request_id] = workload_info
self.dump()
def set_workload_status(self, request_id: str, status: str) -> None:
"""Set status of a given Workload."""
self.load()
existing_data = self.get_workload_data(request_id)
if not existing_data:
return
self.workloads_data["workloads"][request_id]["status"] = status
self.dump()
def update_metrics(
self,
request_id: Optional[str],
metric_data: Dict[str, Any],
) -> None:
"""Update metric data in workload."""
self.load()
self.workloads_data["workloads"][request_id].get("metric", {}).update(
metric_data,
)
self.workloads_data["workloads"][request_id].update(metric_data)
self.dump()
def update_execution_details(
self,
request_id: Optional[str],
execution_details: Dict[str, Any],
) -> None:
"""Update metric data in workload."""
self.load()
self.workloads_data["workloads"][request_id].get(
"execution_details",
{},
).update(execution_details)
self.dump()
def get_active_workspace(self) -> str:
"""Get active workspace."""
path = self.workloads_data.get("active_workspace_path", self.workspace_path)
return path
def set_active_workspace(self, workspace_path: str) -> None:
"""Set active workspace."""
self.workloads_data["active_workspace_path"] = workspace_path
self.dump()
def set_code_template_path(self, code_template_path: str) -> None:
"""Set code_template_path."""
self.workloads_data["workloads"][self.request_id][
"code_template_path"
] = code_template_path
self.dump()
def clean_logs(self) -> None:
"""Clean log files."""
log_files = [os.path.join(self.workload_path, "output.txt")]
log_files.extend(
glob.glob(
os.path.join(self.workload_path, "*.proc"),
),
)
log_files.extend(
glob.glob(
os.path.join(self.workload_path, "*performance_benchmark.txt"),
),
)
for file in log_files:
if os.path.exists(file):
os.remove(file)
def clean_status(
self,
status_to_clean: Optional[str] = None,
requests_id: Optional[List[str]] = [],
) -> None:
"""Clean status for workloads according to passed parameters."""
for workload_id, workload_params in self.workloads_data["workloads"].items():
if requests_id and workload_id not in requests_id:
continue
self._clean_workload_status(
workload_params,
status_to_clean,
)
self.dump()
@staticmethod
def _clean_workload_status(workload: dict, status: Optional[str]) -> None:
"""Clean specified workload status."""
workload_status = workload.get("status")
if status and workload_status != status:
return
workload["status"] = ""
@property
def template_path(self) -> Optional[str]:
"""Get template_path."""
return self.get_workload_data(self.request_id).get("code_template_path", None)
def get_workload_data(self, request_id: Optional[str]) -> dict:
"""Return data of given Workload."""
return self.workloads_data.get("workloads", {}).get(request_id, {})
def get_workload_object(self) -> Workload:
"""Get workload object."""
workload_path = self.workload_path
workload_data = _load_json_as_dict(
os.path.join(workload_path, "workload.json"),
)
return Workload(workload_data)
| StarcoderdataPython |
8022169 | from gpiozero import Buzzer
from time import sleep
buzzer = Buzzer(15)
while True:
"""
buzzer.beep()
sleep(1)
"""
buzzer.off | StarcoderdataPython |
4923709 | <reponame>golyshevskii/yamitoys
import os
from celery import Celery
from django.conf import settings
# DJANGO_SETTINGS_MODULE для программы командной строки Celery
# DJANGO_SETTINGS_MODULE for Celery command line program
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pupa.settings')
# экземпляр класса Celery для проекта
# an instance of the Celery class for the project
app = Celery('pupa', broker_pool_limit=1)
# загрузка любой настраиваемой конфигурации из параметров проекта
# load any custom config from project options
app.config_from_object('django.conf:settings')
# Celery автоматически обнаруживает асинхронные задачи для приложений, перечисленных в параметрах INSTALLED_APPS
# Celery automatically detects asynchronous tasks for applications listed in the INSTALLED_APPS parameters
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
| StarcoderdataPython |
5090028 | _bytes = b"x" * 3
def test_lrusized_acts_like_a_dict():
from guillotina.contrib.cache.lru import LRU
m = LRU(1024)
m.set("a", _bytes, 3)
assert m["a"] == _bytes
assert "a" in m
assert m.get("a") == _bytes
assert m.get_memory() == 3
del m["a"]
assert len(m.keys()) == 0
assert m.get_memory() == 0
def test_clean_till_it_has_enought_space():
from guillotina.contrib.cache.lru import LRU
m = LRU(19)
for k in range(20):
m.set(k, k, 1)
m.set("a", 1, 1)
assert 1 not in m.keys()
r = m[2]
assert r == 2
m.set("b", 1, 1)
assert 2 in m.keys()
assert 3 not in m.keys()
m.set("b", 1, 10)
assert len(m.keys()) == 10
assert 2 in m.keys()
assert m.get_memory() == 19
del m["b"]
assert m.get_memory() == 9
assert len(m.keys()) == 9
# we should cleanup 12 keys
assert m.get_stats() == (1, 0, 12)
def test_setting_a_bigger_value_than_cache_doesnt_brake():
from guillotina.contrib.cache.lru import LRU
m = LRU(1)
m.set("a", "v", 100)
assert "a" not in m.keys()
def test_cache_stats_are_hit():
from guillotina.contrib.cache.lru import LRU
m = LRU(1)
try:
m["a"]
except KeyError:
pass
assert m.get_stats() == (0, 1, 0)
m.set("a", 1, 1)
assert m["a"] == 1
assert m.get_stats() == (1, 1, 0)
def test_cache_clear_resets_memory():
from guillotina.contrib.cache.lru import LRU
m = LRU(2)
m.set("a", 1, 1)
assert m.get_memory() == 1
m.clear()
assert m.get_memory() == 0
assert "a" not in m.keys()
| StarcoderdataPython |
6444871 | <gh_stars>1-10
# A sample application (based on a published sample for the CV2 library) for
# determining the color range needed in order to isolate a specific target
# in an image. The program will default to reading from a connected webcam,
# but can also be used with a sample (still) image.
#
# This was used in 2021 to figure out what color range should be used to
# "see" the yellow balls used in the FRC game "Infinite Recharge", allowing
# the vision-processing code on our robots to be tuned appropriately.
from __future__ import print_function
import argparse
import cv2
import math
import numpy as np
# Value ranges for the yellow balls in the lab (rough):
# H: 11 / 40 S: 118 / 214 V: 94 / 208
# Note: openCV uses a constrained range of 0..179 for H, and 0..255 for S/V
# So we need to normalize accordingly.
max_value = 255
max_value_H = 360//2
low_H = 55 # 38 # 0
high_H = 75 # max_value_H
low_S = 20 # 0
high_S = 102 # 86 # max_value
low_V = 162 # 212 # 0
high_V = 255 # max_value
# H: 55 / 75 S: 20 / 102 V: 162 / 255
window_capture_name = 'Video Capture'
window_detection_name = 'Object Detection'
window_masking_name = 'Masked Image'
low_H_name = 'Low H'
low_S_name = 'Low S'
low_V_name = 'Low V'
high_H_name = 'High H'
high_S_name = 'High S'
high_V_name = 'High V'
def reportStats():
print("H:", low_H, '/', high_H, "S:", low_S, '/', high_S, "V:", low_V, '/', high_V)
def on_low_H_thresh_trackbar(val):
global low_H
global high_H
low_H = val
low_H = min(high_H-1, low_H)
cv2.setTrackbarPos(low_H_name, window_detection_name, low_H)
reportStats()
def on_high_H_thresh_trackbar(val):
global low_H
global high_H
high_H = val
high_H = max(high_H, low_H+1)
cv2.setTrackbarPos(high_H_name, window_detection_name, high_H)
reportStats()
def on_low_S_thresh_trackbar(val):
global low_S
global high_S
low_S = val
low_S = min(high_S-1, low_S)
cv2.setTrackbarPos(low_S_name, window_detection_name, low_S)
reportStats()
def on_high_S_thresh_trackbar(val):
global low_S
global high_S
high_S = val
high_S = max(high_S, low_S+1)
cv2.setTrackbarPos(high_S_name, window_detection_name, high_S)
reportStats()
def on_low_V_thresh_trackbar(val):
global low_V
global high_V
low_V = val
low_V = min(high_V-1, low_V)
cv2.setTrackbarPos(low_V_name, window_detection_name, low_V)
reportStats()
def on_high_V_thresh_trackbar(val):
global low_V
global high_V
high_V = val
high_V = max(high_V, low_V+1)
cv2.setTrackbarPos(high_V_name, window_detection_name, high_V)
reportStats()
# Converts a ratio with ideal value of 1 to a score (range: [0.0 - 100.0]).
def ratioToScore(ratio):
return (max(0.0, min(100*(1-abs(1-ratio)), 100.0)))
# The height and width of the bounding box should be roughly the same (since it's a sphere).
def boundingRatioScore(bounds):
x,y,w,h = bounds
return ratioToScore(float(h)/float(w))
# Ideally, the ratio of the area of the target to that of its bounding box should be
# approximately pi / 4. (Area of a circle is pi * r * r; area of a square is 4*r*r.)
def coverageAreaScore(contour, bounds):
_,_,w,h = bounds
return ratioToScore(cv2.contourArea(contour)/(float(w)*float(h)) / (math.pi / 4))
# A trivial additional score.
def convexityScore(contour):
if cv2.isContourConvex(contour):
return 1
else:
return 0
#############################################
# Main execution starts here
useCamera = True
parser = argparse.ArgumentParser(description='Code for Thresholding Operations using inRange tutorial.')
parser.add_argument('--camera', help='Camera divide number.', default=0, type=int)
parser.add_argument('--file', help='Still image file to be used.', default="")
# Seed the default values for the HSV ranges, based on the numbers set above.
parser.add_argument('--low_H', help='Initial low value for hue.', default=str(low_H))
parser.add_argument('--low_S', help='Initial low value for saturation.', default=str(low_S))
parser.add_argument('--low_V', help='Initial low value for value.', default=str(low_V))
parser.add_argument('--high_H', help='Initial high value for hue.', default=str(high_H))
parser.add_argument('--high_S', help='Initial high value for saturation.', default=str(high_S))
parser.add_argument('--high_V', help='Initial high value for value.', default=str(high_V))
args = parser.parse_args()
if args.file == "":
useCamera = True
cap = cv2.VideoCapture(args.camera)
print("Using data from camera")
else:
useCamera = False
still_image = cv2.imread(args.file)
print("Using data from file '{}'".format(args.file))
# Convert the program arguments for HSV values (default or otherwise) into the
# values to be applied @ program start.
low_H = int(args.low_H)
high_H = int(args.high_H)
low_S = int(args.low_S)
high_S = int(args.high_S)
low_V = int(args.low_V)
high_V = int(args.high_V)
# Set up the windows and controls for editing the color range (and seeing results).
cv2.namedWindow(window_capture_name)
cv2.namedWindow(window_detection_name)
cv2.namedWindow(window_masking_name)
cv2.createTrackbar(low_H_name, window_detection_name , low_H, max_value_H, on_low_H_thresh_trackbar)
cv2.createTrackbar(high_H_name, window_detection_name , high_H, max_value_H, on_high_H_thresh_trackbar)
cv2.createTrackbar(low_S_name, window_detection_name , low_S, max_value, on_low_S_thresh_trackbar)
cv2.createTrackbar(high_S_name, window_detection_name , high_S, max_value, on_high_S_thresh_trackbar)
cv2.createTrackbar(low_V_name, window_detection_name , low_V, max_value, on_low_V_thresh_trackbar)
cv2.createTrackbar(high_V_name, window_detection_name , high_V, max_value, on_high_V_thresh_trackbar)
# Used for morphological ops below
kernel = np.ones((3, 3), np.uint8)
while True:
if useCamera:
ret, frame = cap.read()
if frame is None:
break
else:
frame = still_image.copy()
frame_HSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
frame_threshold = cv2.inRange(frame_HSV, (low_H, low_S, low_V), (high_H, high_S, high_V))
# Perform erosion, to remove noise from the background
# frame_threshold = cv2.erode(frame_threshold, kernel, iterations = 1)
# Perform dilation, to remove small holes inside a larger region
# frame_threshold = cv2.dilate(frame_threshold, kernel, iterations = 1)
# "Opening" is erosion, followed by dilation
frame_threshold = cv2.morphologyEx(frame_threshold, cv2.MORPH_OPEN, kernel, iterations = 2)
# "Closing" is dilation, followed by erosion
# frame_threshold = cv2.morphologyEx(frame_threshold, cv2.MORPH_CLOSE, kernel)
# Find the contours of the possible targets
contours, _ = cv2.findContours(frame_threshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
scores = []
# print("Num contours:", len(contours))
if len(contours) > 0:
# Generate "fitness scores" for each of the possible targets.
# I should really also be figuring out the "roundness" of it, which
# might be approximated by checking how close the centroid is to the
# center of the bounding area.
# Other possible useful data can be seen at:
# https://docs.opencv.org/master/dd/d49/tutorial_py_contour_features.html
for contour in contours:
if cv2.contourArea(contour) >= 60:
boundingBox = cv2.boundingRect(contour)
scores.append(boundingRatioScore(boundingBox)
+ coverageAreaScore(contour, boundingBox)
+ convexityScore(contour)
)
else:
scores.append(0.0)
# The following code assumes that there's only 1 ball, and thus
# we're simply looking for the best match. In a real game, we
# could have multiple balls, and might want to factor in "which
# one is closest (i.e., biggest)".
bestIndex = 0
for index in range(len(contours)):
if scores[index] > scores[bestIndex]:
bestIndex = index
best = contours[bestIndex]
cv2.drawContours(frame, contours, -1, (0,255,0), 1)
cv2.drawContours(frame, contours, bestIndex, (0,0,255), 3)
center, radius = cv2.minEnclosingCircle(best)
cv2.circle(frame, (int(center[0]), int(center[1])), 2, (255,255,255), 1)
masked_img = cv2.bitwise_and(frame, frame, mask=frame_threshold)
cv2.imshow(window_capture_name, frame)
cv2.imshow(window_detection_name, frame_threshold)
cv2.imshow(window_masking_name, masked_img)
key = cv2.waitKey(30)
if key == ord('q') or key == 27:
break
| StarcoderdataPython |
6518897 | <filename>layers/src/function1/index.py
def lambda_handler(event, context):
print("Hello data service function1")
return "hello world" | StarcoderdataPython |
9770425 | from unittest import TestCase, main
from dominio import Lance, Leilao, Usuario
from excecoes import LanceInvalido
class TestLeilao(TestCase):
def setUp(self):
self.gui = Usuario('Gui', 500)
self.yuri = Usuario('Yuri', 500)
self.lance_do_gui = Lance(self.gui, 150)
self.lance_do_yuri = Lance(self.yuri, 100)
self.leilao = Leilao('Celular')
def test_deve_retornar_o_maior_e_o_menor_valor_de_um_lance_quando_adicionados_em_ordem_crescente(self):
self.leilao.propoe(self.lance_do_yuri)
self.leilao.propoe(self.lance_do_gui)
menor_valor_esperado = 100
maior_valor_esperado = 150
self.assertEqual(menor_valor_esperado, self.leilao.menor_lance)
self.assertEqual(maior_valor_esperado, self.leilao.maior_lance)
def test_nao_deve_permitir_propor_um_lance_em_ordem_decrescente(self):
with self.assertRaises(LanceInvalido):
self.leilao.propoe(self.lance_do_gui)
self.leilao.propoe(self.lance_do_yuri)
def test_deve_retornar_o_mesmo_valor_para_o_maior_e_menor_lance_quando_leilao_tiver_um_lance(self):
self.leilao.propoe(self.lance_do_gui)
menor_valor_esperado = 150
maior_valor_esperado = 150
self.assertEqual(menor_valor_esperado, self.leilao.menor_lance)
self.assertEqual(maior_valor_esperado, self.leilao.maior_lance)
def test_deve_retornar_o_maior_e_o_menor_valor_quando_o_leilao_tiver_tres_lances(self):
self.vini = Usuario('Vini', 500)
self.lance_do_vini = Lance(self.vini, 200.0)
self.leilao.propoe(self.lance_do_yuri)
self.leilao.propoe(self.lance_do_gui)
self.leilao.propoe(self.lance_do_vini)
menor_valor_esperado = 100.0
maior_valor_esperado = 200.0
self.assertEqual(menor_valor_esperado, self.leilao.menor_lance)
self.assertEqual(maior_valor_esperado, self.leilao.maior_lance)
def test_deve_permitir_propor_um_lance_caso_o_leilao_nao_tenha_lances(self):
self.leilao.propoe(self.lance_do_yuri)
self.assertEqual([self.lance_do_yuri], self.leilao.lances)
def test_deve_permitir_propor_um_lance_caso_o_ultimo_usuario_seja_diferente(self):
self.leilao.propoe(self.lance_do_yuri)
self.leilao.propoe(self.lance_do_gui)
self.assertEqual([self.lance_do_yuri, self.lance_do_gui], self.leilao.lances)
def test_nao_deve_permitir_propor_lance_caso_o_usuario_seja_o_mesmo(self):
lance_do_yury300 = Lance(self.yuri, 300)
with self.assertRaises(LanceInvalido):
self.leilao.propoe(self.lance_do_yuri)
self.leilao.propoe(lance_do_yury300)
if __name__ == '__main__':
main() | StarcoderdataPython |
8130488 | import os
import pytest
def test_been_called(expect, mocker):
mock_called = mocker.patch('os.path.join')
os.path.join('home', 'log.txt')
expect(mock_called).to.have.been_called
with pytest.raises(AssertionError):
expect(mock_called).to.have_not.been_called
mock_not_called = mocker.patch('os.rmdir')
expect(mock_not_called).to.have_not.been_called
with pytest.raises(AssertionError):
expect(mock_not_called).to.have.been_called
mock_called_several_times = mocker.patch('os.rename')
os.rename('/home/log.txt', '/home/log_new.txt')
os.rename('/home/log.txt', '/home/log_new.txt')
expect(mock_called_several_times).to.have.been_called
with pytest.raises(AssertionError):
expect(mock_called_several_times).to.have_not.been_called
def test_been_called_times(expect, mocker):
mock_called = mocker.patch('os.path.join')
os.path.join('home', 'log.txt')
os.path.join('home', 'log.txt')
os.path.join('home', 'log.txt')
expect(mock_called).to.have.been_called_times(3)
with pytest.raises(AssertionError):
expect(mock_called).to.have_not.been_called_times(3)
mock_not_called = mocker.patch('os.rmdir')
expect(mock_not_called).to.have.been_called_times(0)
expect(mock_not_called).to.have_not.been_called_times(3)
with pytest.raises(AssertionError):
expect(mock_not_called).to.have.been_called_times(3)
def test_been_called_with(expect, mocker):
mock_called = mocker.patch('os.path.join')
os.path.join('home', 'log.txt')
expect(mock_called).to.have.been_called_with('home', 'log.txt')
with pytest.raises(AssertionError):
expect(mock_called).to.have_not.been_called_with('home', 'log.txt')
mock_not_called = mocker.patch('os.rmdir')
expect(mock_not_called).to.have_not.been_called_with('home', 'log.txt')
with pytest.raises(AssertionError):
expect(mock_not_called).to.have.been_called_with('home', 'log.txt')
def test_been_called_once(expect, mocker):
mock_called = mocker.patch('os.path.join')
os.path.join('home', 'log.txt')
expect(mock_called).to.have.been_called_once
with pytest.raises(AssertionError):
expect(mock_called).to.have_not.been_called_once
mock_not_called = mocker.patch('os.rmdir')
expect(mock_not_called).to.have_not.been_called_once
with pytest.raises(AssertionError):
expect(mock_not_called).to.have.been_called_once
mock_called_several_times = mocker.patch('os.rename')
os.rename('/home/log.txt', '/home/log_new.txt')
os.rename('/home/log.txt', '/home/log_new.txt')
expect(mock_called_several_times).to.have_not.been_called_once
with pytest.raises(AssertionError):
expect(mock_called_several_times).to.have.been_called_once
def test_been_called_once_with(expect, mocker):
mock_called = mocker.patch('os.path.join')
os.path.join('home', 'log.txt')
expect(mock_called).to.have.been_called_once_with('home', 'log.txt')
with pytest.raises(AssertionError):
expect(mock_called).to_not.been_called_once_with('home', 'log.txt')
mock_not_called = mocker.patch('os.rmdir')
expect(mock_not_called).to.have_not.been_called_once_with('/home/log.txt')
with pytest.raises(AssertionError):
expect(mock_not_called).to.have.been_called_once_with('/home/log.txt')
mock_called_several_times = mocker.patch('os.rename')
os.rename('/home/log.txt', '/home/log_new.txt')
os.rename('/home/log.txt', '/home/log_new.txt')
expect(mock_called_several_times).to.have_not.been_called_once
with pytest.raises(AssertionError):
expect(mock_called_several_times).to.have.been_called_once
def test_been_called_with_a_spy(expect, mocker):
class Foo():
def bar(self, string, padding):
return string.zfill(padding)
foo = Foo()
spy = mocker.spy(foo, 'bar')
expect(foo.bar('foo', 5)).to.be('00foo')
expect(spy).to.have.been_called
expect(spy).to.have.been_called_once
expect(spy).to.have.been_called_times(1)
expect(spy).to.have.been_called_with('foo', 5)
expect(spy).to.have.been_called_once_with('foo', 5)
with pytest.raises(AssertionError):
expect(spy).to.have_not.been_called
with pytest.raises(AssertionError):
expect(spy).to.have_not.been_called_with('foo', 5)
with pytest.raises(AssertionError):
expect(spy).to.have_not.been_called_once
with pytest.raises(AssertionError):
expect(spy).to.have_not.been_called_times(1)
with pytest.raises(AssertionError):
expect(spy).to.have_not.been_called_once_with('foo', 5)
def test_been_called_with_a_stub(expect, mocker):
def foo(bar):
bar('test')
stub = mocker.stub('bar_stub')
foo(stub)
expect(stub).to.have.been_called
expect(stub).to.have.been_called_once
expect(stub).to.have.been_called_times(1)
expect(stub).to.have.been_called_with('test')
expect(stub).to.have.been_called_once_with('test')
with pytest.raises(AssertionError):
expect(stub).to.have_not.been_called
with pytest.raises(AssertionError):
expect(stub).to.have_not.been_called_with('test')
with pytest.raises(AssertionError):
expect(stub).to.have_not.been_called_once
with pytest.raises(AssertionError):
expect(stub).to.have_not.been_called_times(1)
with pytest.raises(AssertionError):
expect(stub).to.have_not.been_called_once_with('test')
def test_been_called_with_an_incompatible_object(expect, mocker):
def foo():
pass
foo()
with pytest.raises(AssertionError):
expect(foo).to.have.been_called
with pytest.raises(AssertionError):
expect(foo).to.have.been_called_once
with pytest.raises(AssertionError):
expect(foo).to.have.been_called_times(1)
with pytest.raises(AssertionError):
expect(foo).to.have.been_called_with('something')
with pytest.raises(AssertionError):
expect(foo).to.have.been_called_once_with('something')
| StarcoderdataPython |
9632991 | <gh_stars>1-10
import tensorflow as tf
import numpy as np
def remove_test_time_dropout(model_drop):
config = model_drop.get_config()
for layer in config['layers']:
if 'Dropout' in layer['class_name']:
layer['inbound_nodes'][0][0][3]['training'] = False
model = tf.keras.Model().from_config(config)
model.set_weights(model_drop.get_weights())
return model
def ds2numpy(ds, max_num_batches):
x, y = [], []
for batch in ds.take(max_num_batches):
x.append(batch[0].numpy())
y.append(batch[1].numpy())
x = np.concatenate(x, axis=0)
y = np.concatenate(y, axis=0)
return x, y
| StarcoderdataPython |
9661254 | # Esconde senha
# Faça uma função que recebe uma senha (string) e devolve uma string do mesmo tamanho da senha formada somente por asteriscos ('*').
# O nome da sua função deve ser esconde_senha.
def esconde_senha (password):
length = len(password)
hidden = "*" * length
return hidden
| StarcoderdataPython |
3318998 | import os, re
import numpy as np
import matplotlib.pyplot as plt
def rename_files(path):
for count, filename in enumerate(os.listdir(path)):
name, number = filename.split('.')
if (bool(re.search('^[-+]?[0-9]+$', number))):
number = str('%03d' % int(number),)
new_filename = name + '.' + number
os.rename(path + filename, path + new_filename)
def buildsSectionByAxis(images, section, axis = None):
newImage = []
if not axis:
return images[section]
if axis == 'y':
for image in images:
newImage.append(image[section])
if axis == 'z':
for image in images:
newLine = []
for line in image:
newLine.append(line[section])
newImage.append(newLine)
return newImage
def retrieveEachImage(root, files, images):
resolution = [512, 512]
for file in files:
image = np.fromfile(os.path.join(root, file), dtype='int16', sep='')
images.append(image.reshape(resolution))
def getImages(path, images):
for (root, directories, files) in os.walk(path):
files.sort()
retrieveEachImage(root, files, images)
return images
def show_images(images):
for i in range(len(images)):
plt.imshow(images[i], cmap='gray')
plt.show()
def main():
# rename_files('/home/jhonatan/Desktop/multiplanar-reconstruction/Arterielle/')
frame = 350
images = getImages('./Arterielle', [])
sectionX = buildsSectionByAxis(images, frame)
sectionY = buildsSectionByAxis(images, frame, 'y')
sectionZ = buildsSectionByAxis(images, frame, 'z')
show_images([sectionX, sectionY, sectionZ])
if __name__ == '__main__':
main()
| StarcoderdataPython |
9651791 | ## Imports
import os
import sys
import inspect
import unittest
# Include path to handybeam directory
sys.path.append('../.')
## Class
class TranslatorTests(unittest.TestCase):
def setUp(self):
self.translator = None
def tearDown(self):
del self.translator
def test_import(self):
module_name = 'handybeam.translator'
import handybeam.translator
fail = False
if module_name not in sys.modules:
fail = True
self.assertEqual(fail,False)
def test_instance_creation(self):
import handybeam.translator
self.translator = handybeam.translator.Translator()
fail = True
if isinstance(self.translator, handybeam.translator.Translator):
fail = False
self.assertEqual(fail,False)
## Script
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
4821450 | class player:
def __init__(self, health, attackpower, spec, armor, name, race):
self.health = health
self.attackpower = attackpower
self.spec = spec
self.armor = armor
self.name = name
self.race = race
def take_damage(self, damage):
relative_damage = damage - self.armor
if relative_damage > 0:
self.health -= relative_damage
if self.health < 0: self.health = 0
def get_attackpower(self):
return self.attackpower
def get_health(self):
return self.health
def get_name(self):
return self.name
class teammate:
def __init__(self, health, attackpower, spec, armor, name, race):
self.health = health
self.attackpower = attackpower
self.spec = spec
self.armor = armor
self.name = name
self.race = race
def take_damage(self, damage):
relative_damage = damage - self.armor
if relative_damage > 0:
self.health -= relative_damage
if self.health < 0: self.health = 0
def get_attackpower(self): # tidigare damage
return self.attackpower
def get_health(self):
return self.health
def get_name(self):
return self.name
class enemy:
def __init__(self, health, attackpower, spec, armor, name, race):
self.health = health
self.attackpower = attackpower
self.spec = spec
self.armor = armor
self.name = name
self.race = race
def take_damage(self, damage):
relative_damage = damage - self.armor
if relative_damage > 0:
self.health -= relative_damage
if self.health < 0: self.health = 0
def get_attackpower(self): # tidigare damage
return self.attackpower
def get_health(self):
return self.health
def get_name(self):
return self.name
| StarcoderdataPython |
165063 | """Solr Tests"""
import os
import pytest
from hamcrest import contains_string, assert_that
# pylint: disable=redefined-outer-name
@pytest.fixture()
def get_ansible_vars(host):
"""Define get_ansible_vars"""
java_role = "file=../../../java/vars/main.yml name=java_role"
common_vars = "file=../../../common/vars/main.yml name=common_vars"
common_defaults = "file=../../../common/defaults/main.yml name=common_defaults"
common_hosts = "file=../../../common/defaults/main.yml name=common_hosts"
search_services = "file=../../vars/main.yml name=search_services"
ansible_vars = host.ansible("include_vars", java_role)["ansible_facts"]["java_role"]
ansible_vars.update(host.ansible("include_vars", java_role)["ansible_facts"]["java_role"])
ansible_vars.update(host.ansible("include_vars", common_vars)["ansible_facts"]["common_vars"])
ansible_vars.update(host.ansible("include_vars", common_hosts)["ansible_facts"]["common_hosts"])
ansible_vars.update(host.ansible("include_vars", common_defaults)["ansible_facts"]["common_defaults"])
ansible_vars.update(host.ansible("include_vars", search_services)["ansible_facts"]["search_services"])
return ansible_vars
test_host = os.environ.get('TEST_HOST')
def test_solr_log_exists(host, get_ansible_vars):
"Check that solr log"
assert_that(host.file("{}/solr.log".format(get_ansible_vars["logs_folder"])).exists, get_ansible_vars["logs_folder"])
@pytest.mark.parametrize("svc", ["alfresco-search"])
def test_search_service_running_and_enabled(host, svc):
"""Check alfresco-search service"""
alfresco_search = host.service(svc)
assert_that(alfresco_search.is_running)
assert_that(alfresco_search.is_enabled)
def test_solr_stats_is_accesible(host, get_ansible_vars):
"""Check that SOLR creates the alfresco and archive cores"""
alfresco_core_command = host.run("curl -iL http://{}:8983/solr/#/~cores/alfresco".format(test_host))
archive_core_command = host.run("curl -iL http://{}:8983/solr/#/~cores/archive".format(test_host))
assert_that(alfresco_core_command.stdout, contains_string("HTTP/1.1 200"))
assert_that(archive_core_command.stdout, contains_string("HTTP/1.1 200"))
def test_environment_jvm_opts(host, get_ansible_vars):
"Check that overwritten JVM_OPTS are taken into consideration"
pid = host.run("/opt/openjdk*/bin/jps -lV | grep start.jar | awk '{print $1}'")
process_map = host.run("/opt/openjdk*/bin/jhsdb jmap --heap --pid {}".format(pid.stdout))
assert_that(process_map.stdout, contains_string("MaxHeapSize = 943718400 (900.0MB)"))
| StarcoderdataPython |
1828551 | <reponame>marykamau2/Blog<filename>tests/test_comment.py
from app.models import Comment,User,Blog
from app import db
import unittest
class CommentModelTest(unittest.TestCase):
def setUp(self):
self.user_Peris = User(username = 'Peris',password = '<PASSWORD>', email = '<EMAIL>')
self.new_blog = Blog(id=1,title_blog='Test',blog_content='This is a Blog test',category="interview",user = self.user_Peris)
self.new_comment = Comment(id=1,comment='Test comment',user=self.user_Peris,blog=self.new_blog)
def tearDown(self):
User.query.delete()
Blog.query.delete()
Comment.query.delete()
def test_check_instance_variables(self):
self.assertEquals(self.new_comment.comment,'Test comment')
self.assertEquals(self.new_comment.user,self.user_Peris)
self.assertEquals(self.new_comment.blog,self.new_blog) | StarcoderdataPython |
188054 | n1 = str(input('digite uma frase: ')).strip().upper()
print('A letra A aparece {} vezes na frase'.format(n1.count('A')))
print('A primeira letra A apareceu na posição {}'.format(n1.find('A')+1))
print('A ultima letra A aprece na posição {}'.format(n1.rfind('A')+1)) | StarcoderdataPython |
4969936 | <reponame>IgorBwork/django-libreport<filename>reports/migrations/0003_auto_20171119_1232.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import reports.models
class Migration(migrations.Migration):
dependencies = [
('reports', '0002_auto_20171117_1004'),
]
operations = [
migrations.AlterField(
model_name='report',
name='document',
field=models.FileField(max_length=1024, null=True, upload_to=reports.models.report_upload_to, blank=True),
),
]
| StarcoderdataPython |
1978936 | <filename>06-Object_tracking/05_background_subtraction.py
#
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
mog2 = cv2.createBackgroundSubtractorMOG2()
# knn = cv2.createBackgroundSubtractorKNN()
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
# apply background subtractor
mog2_frame = mog2.apply(frame)
# knn_frame = knn.apply(frame)
# display the frames
cv2.imshow("Original footage", frame)
cv2.imshow("MOG2 Frame", mog2_frame)
# cv2.imshow("KNN frame", knn_frame)
k = cv2.waitKey(1)
if k==ord('q'):
break
cap.release()
cv2.destroyAllWindows() | StarcoderdataPython |
5148173 | <reponame>Attanon/classic-assist-attanon-lib<gh_stars>0
def Autoloot(obj):
"""
Sets the container for the Autoloot agent to put items into...
:param obj: An entity serial in integer or hex format, or an alias string such as "self".
"""
pass
def Counter(name: str):
"""
Returns the count of the given counter agent.
:param name: Agent entry name.
"""
pass
def Dress(name: str):
"""
Returns true if the Dress agent is currently dressing or undressing.
:param name: Agent entry name. (Optional)
"""
pass
def DressConfig():
"""
Adds all equipped items to a temporary list that isn't persisted on client close.
"""
pass
def Dressing():
"""
Returns true if the Dress agent is currently dressing or undressing.
"""
pass
def Organizer(entryname: str, sourcecontainer, destinationcontainer):
"""
Set the source and destination for the specified Organizer name
:param entryname: Agent entry name.
:param sourcecontainer: An entity serial in integer or hex format, or an alias string such as "self". (Optional)
:param destinationcontainer: An entity serial in integer or hex format, or an alias string such as "self". (Optional)
"""
pass
def Organizing():
"""
Returns true if currently running an organizer agent, or false if not.
"""
pass
def SetAutolootContainer(obj):
"""
Sets the container for the Autoloot agent to put items into...
:param obj: An entity serial in integer or hex format, or an alias string such as "self".
"""
pass
def SetOrganizerContainers(entryname: str, sourcecontainer, destinationcontainer):
"""
Set the source and destination for the specified Organizer name
:param entryname: Agent entry name.
:param sourcecontainer: An entity serial in integer or hex format, or an alias string such as "self". (Optional)
:param destinationcontainer: An entity serial in integer or hex format, or an alias string such as "self". (Optional)
"""
pass
def SetVendorBuyAutoBuy(listname: str, onoff: str):
"""
Enables or disables autobuying of the specified vendor buy list name...
:param listname: List name.
:param onoff: "on" or "off". (Optional)
"""
pass
def Undress(name: str):
"""
Undress all items in the specified dress agent.
:param name: Agent entry name.
"""
pass | StarcoderdataPython |
5186834 | import explanes as el
table = [['a', 'b', 1, 2], ['a', 'c', 2, 2], ['a', 'b', 2, 2]]
print(el.util.constantColumn(table))
| StarcoderdataPython |
4940651 | <reponame>hajime9652/observations
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def fertil3(path):
"""fertil3
Data loads lazily. Type data(fertil3) into the console.
A data.frame with 72 rows and 24 variables:
- gfr. births per 1000 women 15-44
- pe. real value pers. exemption, $
- year. 1913 to 1984
- t. time trend, t=1,...,72
- tsq. t^2
- pe\_1. pe[\_n-1]
- pe\_2. pe[\_n-2]
- pe\_3. pe[\_n-3]
- pe\_4. pe[\_n-4]
- pill. =1 if year >= 1963
- ww2. =1, 1941 to 1945
- tcu. t^3
- cgfr. change in gfr: gfr - gfr\_1
- cpe. pe - pe\_1
- cpe\_1. cpe[\_n-1]
- cpe\_2. cpe[\_n-2]
- cpe\_3. cpe[\_n-3]
- cpe\_4. cpe[\_n-4]
- gfr\_1. gfr[\_n-1]
- cgfr\_1. cgfr[\_n-1]
- cgfr\_2. cgfr[\_n-2]
- cgfr\_3. cgfr[\_n-3]
- cgfr\_4. cgfr[\_n-4]
- gfr\_2. gfr[\_n-2]
https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_
isbn_issn=9781111531041
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `fertil3.csv`.
Returns:
Tuple of np.ndarray `x_train` with 72 rows and 24 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'fertil3.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/wooldridge/fertil3.csv'
maybe_download_and_extract(path, url,
save_file_name='fertil3.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| StarcoderdataPython |
300179 | import random
from itertools import cycle
import numpy as np
import pygame
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("default")
import settings
birds = []
bestbird = None
def main():
settings.init()
while True:
# select random background sprites
randBg = random.randint(0, len(settings.BACKGROUNDS_LIST) - 1)
settings.IMAGES['background'] = pygame.image.load(settings.BACKGROUNDS_LIST[randBg]).convert()
crashInfo = mainGame()
def mainGame():
from bird import bird
global birds
#Initial Population
birds = [0] * settings.POPULATION
for i in range(settings.POPULATION):
birds[i] = bird()
birds[i].initialize()
while True:
# get 2 new pipes to add to upperPipes lowerPipes list
newPipe1 = getRandomPipe()
newPipe2 = getRandomPipe()
# list of upper pipes
upperPipes = [
{'x': settings.SCREENWIDTH + 200, 'y': newPipe1[0]['y']},
{'x': settings.SCREENWIDTH + 200 + (settings.SCREENWIDTH / 2), 'y': newPipe2[0]['y']},
]
# list of lowerpipe
lowerPipes = [
{'x': settings.SCREENWIDTH + 200, 'y': newPipe1[1]['y']},
{'x': settings.SCREENWIDTH + 200 + (settings.SCREENWIDTH / 2), 'y': newPipe2[1]['y']},
]
pipeVelX = -4
num_crashed_birds = 0
refresh = False
while True and not refresh:
# move pipes to left
for uPipe, lPipe in zip(upperPipes, lowerPipes):
uPipe['x'] += pipeVelX
lPipe['x'] += pipeVelX
# add new pipe when first pipe is about to touch left of screen
if 0 < upperPipes[0]['x'] < 5:
newPipe = getRandomPipe()
upperPipes.append(newPipe[0])
lowerPipes.append(newPipe[1])
# remove first pipe if its out of the screen
if upperPipes[0]['x'] < -settings.pipeW:
upperPipes.pop(0)
lowerPipes.pop(0)
# draw sprites
settings.SCREEN.blit(settings.IMAGES['background'], (0, 0))
for uPipe, lPipe in zip(upperPipes, lowerPipes):
rand_pipe_int = random.randint(0,len(settings.PIPES_LIST)-1)
settings.SCREEN.blit(settings.IMAGES['pipe'][rand_pipe_int][0], (uPipe['x'], uPipe['y']))
settings.SCREEN.blit(settings.IMAGES['pipe'][rand_pipe_int][1], (lPipe['x'], lPipe['y']))
for bird_instance in birds:
if not bird_instance.crashed and num_crashed_birds<len(birds):
# check for crash here
crashTest = bird_instance.checkcrash(upperpipes=upperPipes,lowerpipes=lowerPipes)
if crashTest[0]:
bird_instance.crashed = True
num_crashed_birds += 1
bird_instance.update_score(upperPipes)
bird_instance.think(upperPipes,lowerPipes)
settings.SCREEN.blit(settings.IMAGES['base'], (bird_instance.basex, settings.BASEY))
# print score so player overlaps the score
showScore(bird_instance.score)
bird_instance.update_surface()
settings.SCREEN.blit(bird_instance.playerSurface, (bird_instance.playerx, bird_instance.playery))
if num_crashed_birds == len(birds):
num_crashed_birds = 0
birds = nextGeneration()
refresh = True
break
pygame.display.update()
settings.FPSCLOCK.tick(settings.FPS)
def playerShm(playerShm):
"""oscillates the value of playerShm['val'] between 8 and -8"""
if abs(playerShm['val']) == 8:
playerShm['dir'] *= -1
if playerShm['dir'] == 1:
playerShm['val'] += 1
else:
playerShm['val'] -= 1
def getRandomPipe():
"""returns a randomly generated pipe"""
# y of gap between upper and lower pipe
gapY = random.randrange(0, int(settings.BASEY * 0.6 - settings.PIPEGAPSIZE))
gapY += int(settings.BASEY * 0.2)
pipeHeight = settings.IMAGES['pipe'][0][0].get_height()
pipeX = settings.SCREENWIDTH + 10
return [
{'x': pipeX, 'y': gapY - pipeHeight}, # upper pipe
{'x': pipeX, 'y': gapY + settings.PIPEGAPSIZE}, # lower pipe
]
def showScore(score):
"""displays score in center of screen"""
scoreDigits = [int(x) for x in list(str(score))]
totalWidth = 0 # total width of all numbers to be printed
for digit in scoreDigits:
totalWidth += settings.IMAGES['numbers'][digit].get_width()
Xoffset = (settings.SCREENWIDTH - totalWidth) / 2
for digit in scoreDigits:
settings.SCREEN.blit(settings.IMAGES['numbers'][digit], (Xoffset, settings.SCREENHEIGHT * 0.1))
Xoffset += settings.IMAGES['numbers'][digit].get_width()
#### Genetic ALgorithm helper functions implementation
def calculatefitness():
global birds
sum = 0
for bird_inst in birds:
# print("Check 2")
sum += bird_inst.score
for bird_inst in birds:
if sum != 0:
bird_inst.fitness = bird_inst.score
else:
bird_inst.fitness = 0
# print(sum)
return sum!=0
def pickOne():
global birds
index = 0
fitness_vals = np.array([birds[index].fitness for index in range(len(birds))])
index2 = np.argmax(fitness_vals)
print(fitness_vals[index2])
r = random.random()
while(r>0 and index<len(birds)):
r = r-birds[index].fitness
index+=1
index-=1
return index2
def nextGeneration():
global birds
from bird import bird
if(calculatefitness()):
birds_next = [None]*settings.POPULATION
birds_next[0] = bird()
args = {}
index = pickOne()
args['nn'] = birds[index].nn.copy()
birds_next[0].initialize(args)
for i in range(1,settings.POPULATION):
birds_next[i] = bird()
args = {}
args['nn'] = birds[index].nn.mutate(0.01)
birds_next[i].initialize(args)
else:
birds_next = [None]*settings.POPULATION
for i in range(settings.POPULATION):
birds_next[i] = bird()
birds_next[i].initialize()
return birds_next
if __name__ == '__main__':
main() | StarcoderdataPython |
3507330 | import os, sys
sys.path.append("../NetVLAD-pytorch")
import torch
import torch.nn as nn
from torch.autograd import Variable
from netvlad import NetVLAD, NetVLADPure
from netvlad import EmbedNet
from hard_triplet_loss import HardTripletLoss
from torchvision.models import resnet18
# Discard layers at the end of base network
# encoder = resnet18(pretrained=True)
# base_model = nn.Sequential(
# encoder.conv1,
# encoder.bn1,
# encoder.relu,
# encoder.maxpool,
# encoder.layer1,
# encoder.layer2,
# encoder.layer3,
# encoder.layer4,
# )
# dim = list(base_model.parameters())[-1].shape[0] # last channels (512)
#
# # Define model for embedding
# net_vlad = NetVLAD(num_clusters=32, dim=dim, alpha=1.0)
# model = EmbedNet(base_model, net_vlad).cuda()
#
# # Define loss
# criterion = HardTripletLoss(margin=0.1).cuda()
#
# # This is just toy example. Typically, the number of samples in each classes are 4.
# labels = torch.randint(0, 10, (40, )).long()
# x = torch.rand(40, 3, 128, 128).cuda()
# output = model(x)
#
# triplet_loss = criterion(output, labels)
# input: N * D0
feature_dim = 256
batches = 40
N = 100
net_vlad = NetVLADPure(num_clusters=32, dim=feature_dim, alpha=1.0)
x = torch.rand(batches, feature_dim, N).cuda()
net_vlad = net_vlad.cuda()
y = net_vlad(x)
print(y)
# output: D1
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.