max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
workflows/async-horovod/Problem.py | mdorier/Supervisor | 10 | 6616551 |
# PROBLEM
# The bounding box for the optimization problem
# This should be a user plug-in
from collections import OrderedDict
class Problem():
def __init__(self):
space = OrderedDict()
#problem specific parameters
# space['drop'] = (0, 0.9)
# space['batch_size'] = [16, 32, 64, 128, 256, 512]
# space['p3'] = [2 , 4, 8, 16, 32, 64, 128]
# space['p4'] = ['a', 'b', 'c']
# space["learning_rate"] = (0,0.009) # Make discrete values
space["learning_rate"] = [ 0.001, 0.002, 0.003, 0.004, 0.005,
0.006, 0.007, 0.008, 0.009 ]
# Use 5 epochs
# Add Horovod PARALLELISM [ 64, 128 , 256, 512 ]
# ? 1.5h ? ?
# Add batch size
self.space = space
self.params = self.space.keys()
self.starting_point = [0.1, 16]
# if __name__ == '__main__':
# instance = Problem()
# print(instance.space)
# print(instance.params)
|
# PROBLEM
# The bounding box for the optimization problem
# This should be a user plug-in
from collections import OrderedDict
class Problem():
def __init__(self):
space = OrderedDict()
#problem specific parameters
# space['drop'] = (0, 0.9)
# space['batch_size'] = [16, 32, 64, 128, 256, 512]
# space['p3'] = [2 , 4, 8, 16, 32, 64, 128]
# space['p4'] = ['a', 'b', 'c']
# space["learning_rate"] = (0,0.009) # Make discrete values
space["learning_rate"] = [ 0.001, 0.002, 0.003, 0.004, 0.005,
0.006, 0.007, 0.008, 0.009 ]
# Use 5 epochs
# Add Horovod PARALLELISM [ 64, 128 , 256, 512 ]
# ? 1.5h ? ?
# Add batch size
self.space = space
self.params = self.space.keys()
self.starting_point = [0.1, 16]
# if __name__ == '__main__':
# instance = Problem()
# print(instance.space)
# print(instance.params)
| en | 0.493397 | # PROBLEM # The bounding box for the optimization problem # This should be a user plug-in #problem specific parameters # space['drop'] = (0, 0.9) # space['batch_size'] = [16, 32, 64, 128, 256, 512] # space['p3'] = [2 , 4, 8, 16, 32, 64, 128] # space['p4'] = ['a', 'b', 'c'] # space["learning_rate"] = (0,0.009) # Make discrete values # Use 5 epochs # Add Horovod PARALLELISM [ 64, 128 , 256, 512 ] # ? 1.5h ? ? # Add batch size # if __name__ == '__main__': # instance = Problem() # print(instance.space) # print(instance.params) | 3.014193 | 3 |
Data_Helper/__init__.py | ShuzhiLiu/MaskRCNN | 9 | 6616552 | <reponame>ShuzhiLiu/MaskRCNN<gh_stars>1-10
from Data_Helper.cocotools import CocoTools
| from Data_Helper.cocotools import CocoTools | none | 1 | 1.060545 | 1 | |
oocfg/cfg.py | Prolht/OOCfg | 0 | 6616553 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@Time : 2020/12/12 21:04
@Author : lex(<EMAIL>)
The module to parse the config file
"""
from oocfg.config import options as opt
from oocfg.config import exceptions
from oocfg.config import utils
class Config(object):
GROUP_REGISTERED = False
def __init__(self):
self.config_file = None
self._setup_cfg()
def _setup_cfg(self):
self.CONF = opt.ConfigOpts()
def _load_config_file(self):
conf_type = utils.get_config_file_type(self.config_file)
if conf_type == 'ini':
self.config_map = utils.load_ini_cofing(self.config_file)
elif conf_type == 'yaml':
self.config_map = utils.load_yaml_config(self.config_file)
elif conf_type == 'conf':
self.config_map = utils.load_conf_config(self.config_file)
def set_default_config(self, sections):
"""
set default config value.
After the default config value, we can override the config with config file.
This is import because all the config load from config file
will work after this method
:param sections:
:return:
"""
for group, opts in sections.items():
self.validate_opts(opts)
self.CONF.register_opts(group.upper(), opts)
self.GROUP_REGISTERED = True
def validate_sections(self, sections):
if sections == '' or sections == {}:
raise exceptions.EmptySections()
if isinstance(sections, list):
raise exceptions.SectionsFormatError()
def validate_opts(self, opts):
if not isinstance(opts, list):
raise exceptions.OptsFormatError()
def startup(self, sections, config_file=None, auto_find=False):
"""
main method of load config file
:param config_file: the absolute path of config_file, like, /etc/project/config.ini
:param sections: the default config group to register
:param auto_find: if config_file is None, whether to find config file
:return:
"""
self.validate_sections(sections)
self.set_default_config(sections)
# this method should be called after register_all_group
if config_file is None and not auto_find:
# the default config value is enough
return
self.config_file = config_file
self._load_config_file()
if not self.GROUP_REGISTERED:
raise exceptions.GroupNoRegistered()
self.CONF.set_config_file_value(self.config_map)
cfg = Config()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@Time : 2020/12/12 21:04
@Author : lex(<EMAIL>)
The module to parse the config file
"""
from oocfg.config import options as opt
from oocfg.config import exceptions
from oocfg.config import utils
class Config(object):
GROUP_REGISTERED = False
def __init__(self):
self.config_file = None
self._setup_cfg()
def _setup_cfg(self):
self.CONF = opt.ConfigOpts()
def _load_config_file(self):
conf_type = utils.get_config_file_type(self.config_file)
if conf_type == 'ini':
self.config_map = utils.load_ini_cofing(self.config_file)
elif conf_type == 'yaml':
self.config_map = utils.load_yaml_config(self.config_file)
elif conf_type == 'conf':
self.config_map = utils.load_conf_config(self.config_file)
def set_default_config(self, sections):
"""
set default config value.
After the default config value, we can override the config with config file.
This is import because all the config load from config file
will work after this method
:param sections:
:return:
"""
for group, opts in sections.items():
self.validate_opts(opts)
self.CONF.register_opts(group.upper(), opts)
self.GROUP_REGISTERED = True
def validate_sections(self, sections):
if sections == '' or sections == {}:
raise exceptions.EmptySections()
if isinstance(sections, list):
raise exceptions.SectionsFormatError()
def validate_opts(self, opts):
if not isinstance(opts, list):
raise exceptions.OptsFormatError()
def startup(self, sections, config_file=None, auto_find=False):
"""
main method of load config file
:param config_file: the absolute path of config_file, like, /etc/project/config.ini
:param sections: the default config group to register
:param auto_find: if config_file is None, whether to find config file
:return:
"""
self.validate_sections(sections)
self.set_default_config(sections)
# this method should be called after register_all_group
if config_file is None and not auto_find:
# the default config value is enough
return
self.config_file = config_file
self._load_config_file()
if not self.GROUP_REGISTERED:
raise exceptions.GroupNoRegistered()
self.CONF.set_config_file_value(self.config_map)
cfg = Config()
| en | 0.673032 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- @Time : 2020/12/12 21:04 @Author : lex(<EMAIL>) The module to parse the config file set default config value. After the default config value, we can override the config with config file. This is import because all the config load from config file will work after this method :param sections: :return: main method of load config file :param config_file: the absolute path of config_file, like, /etc/project/config.ini :param sections: the default config group to register :param auto_find: if config_file is None, whether to find config file :return: # this method should be called after register_all_group # the default config value is enough | 2.960632 | 3 |
irisreader/utils/lazy_file_header_list.py | chuwyler/IRISreader | 0 | 6616554 | #!/usr/bin/env python3
import numpy as np
class lazy_file_header_list:
"""
This class abstracts a list of headers stored in many different files.
It holds a list for every file of iris_data_cube and loads headers lazy
upon request. It abstracts access such that it can be used almost like a
regular python list (assigning to elements is currently not implemented).
Parameters
----------
valid_steps : numpy.ndarray
valid_steps array from iris_data_cube
file_load_fn : function
function to load a specific file
"""
# constructor
def __init__( self, valid_steps, file_load_fn ):
# the valid steps numpy array from iris_data_cube, passed by reference!
self._valid_steps = valid_steps
# function to load a specific file
self._load_file = file_load_fn
# create a list with a sublist for every file: this is the representation
# under the hood
n_files = int( np.max( valid_steps[:,0] ) + 1 )
self._data = [[]] * n_files
# abstract access to the headers in order to appear as list on the outside
# this function can return both single items but also slices of data
def __getitem__( self, index ):
# get file number and file step (numpy array)
steps = self._valid_steps[ index, : ]
# if steps contains only one line: make sure it's two-dimensional
if steps.ndim == 1:
steps = steps.reshape(1,-1)
# load all the required files and create a list with the requested data
res = []
for file_no in np.unique( steps[:,0] ):
if len( self._data[ file_no ] ) == 0: # load data if not there yet
self._data[ file_no ] = self._load_file( file_no )
# get file steps that are to be used
file_steps = steps[steps[:,0]==file_no,:][:,1]
# append the headers to the output
res.extend( [ self._data[ file_no ][i] for i in file_steps ] )
# make sure that single outputs are not encapsulated in a list
if len(res) == 1:
return res[0]
else:
return res
# returns the number of headers that are (lazily) available
def __len__( self ):
return self._valid_steps.shape[0]
# too complicated to deal with slices: raise an error if user wants to set a whole slice
def __setitem__( self, index, value ):
if isinstance( index, (list, tuple, slice) ):
raise NotImplementedError( "The assignment of arrays to lazy header lists is currently not possible. Please use .tolist() to get a list representation" )
self[index] # make sure the file belonging to index is loaded
file_no, file_step = self._valid_steps[ index, : ]
self._data[ file_no ][ file_step ] = value
# delete an item (e.g. when removing bad images after cropping)
# this is done automatically, since _valid_steps is passed by reference!
# def __delitem__( self, index )
# convert representation into a regular python list (will load everything)
def tolist( self ):
return self[:]
# upon print call return a list representation (will load everything)
def __repr__( self ):
return str( self.tolist() )
# Test code
if __name__ == "__main__":
import irisreader as ir
from irisreader import iris_data_cube
ir.config.verbosity_level = 4
raster_data = iris_data_cube(
[ "/home/chuwyler/Desktop/IRISreader/irisreader/data/IRIS_raster_test1.fits",
"/home/chuwyler/Desktop/IRISreader/irisreader/data/IRIS_raster_test2.fits" ],
line="Mg"
)
lh = lazy_file_header_list( raster_data._valid_steps, raster_data._load_time_specific_header_file )
for header in lh:
print( header['DATE_OBS'] )
| #!/usr/bin/env python3
import numpy as np
class lazy_file_header_list:
"""
This class abstracts a list of headers stored in many different files.
It holds a list for every file of iris_data_cube and loads headers lazy
upon request. It abstracts access such that it can be used almost like a
regular python list (assigning to elements is currently not implemented).
Parameters
----------
valid_steps : numpy.ndarray
valid_steps array from iris_data_cube
file_load_fn : function
function to load a specific file
"""
# constructor
def __init__( self, valid_steps, file_load_fn ):
# the valid steps numpy array from iris_data_cube, passed by reference!
self._valid_steps = valid_steps
# function to load a specific file
self._load_file = file_load_fn
# create a list with a sublist for every file: this is the representation
# under the hood
n_files = int( np.max( valid_steps[:,0] ) + 1 )
self._data = [[]] * n_files
# abstract access to the headers in order to appear as list on the outside
# this function can return both single items but also slices of data
def __getitem__( self, index ):
# get file number and file step (numpy array)
steps = self._valid_steps[ index, : ]
# if steps contains only one line: make sure it's two-dimensional
if steps.ndim == 1:
steps = steps.reshape(1,-1)
# load all the required files and create a list with the requested data
res = []
for file_no in np.unique( steps[:,0] ):
if len( self._data[ file_no ] ) == 0: # load data if not there yet
self._data[ file_no ] = self._load_file( file_no )
# get file steps that are to be used
file_steps = steps[steps[:,0]==file_no,:][:,1]
# append the headers to the output
res.extend( [ self._data[ file_no ][i] for i in file_steps ] )
# make sure that single outputs are not encapsulated in a list
if len(res) == 1:
return res[0]
else:
return res
# returns the number of headers that are (lazily) available
def __len__( self ):
return self._valid_steps.shape[0]
# too complicated to deal with slices: raise an error if user wants to set a whole slice
def __setitem__( self, index, value ):
if isinstance( index, (list, tuple, slice) ):
raise NotImplementedError( "The assignment of arrays to lazy header lists is currently not possible. Please use .tolist() to get a list representation" )
self[index] # make sure the file belonging to index is loaded
file_no, file_step = self._valid_steps[ index, : ]
self._data[ file_no ][ file_step ] = value
# delete an item (e.g. when removing bad images after cropping)
# this is done automatically, since _valid_steps is passed by reference!
# def __delitem__( self, index )
# convert representation into a regular python list (will load everything)
def tolist( self ):
return self[:]
# upon print call return a list representation (will load everything)
def __repr__( self ):
return str( self.tolist() )
# Test code
if __name__ == "__main__":
import irisreader as ir
from irisreader import iris_data_cube
ir.config.verbosity_level = 4
raster_data = iris_data_cube(
[ "/home/chuwyler/Desktop/IRISreader/irisreader/data/IRIS_raster_test1.fits",
"/home/chuwyler/Desktop/IRISreader/irisreader/data/IRIS_raster_test2.fits" ],
line="Mg"
)
lh = lazy_file_header_list( raster_data._valid_steps, raster_data._load_time_specific_header_file )
for header in lh:
print( header['DATE_OBS'] )
| en | 0.831075 | #!/usr/bin/env python3 This class abstracts a list of headers stored in many different files. It holds a list for every file of iris_data_cube and loads headers lazy upon request. It abstracts access such that it can be used almost like a regular python list (assigning to elements is currently not implemented). Parameters ---------- valid_steps : numpy.ndarray valid_steps array from iris_data_cube file_load_fn : function function to load a specific file # constructor # the valid steps numpy array from iris_data_cube, passed by reference! # function to load a specific file # create a list with a sublist for every file: this is the representation # under the hood # abstract access to the headers in order to appear as list on the outside # this function can return both single items but also slices of data # get file number and file step (numpy array) # if steps contains only one line: make sure it's two-dimensional # load all the required files and create a list with the requested data # load data if not there yet # get file steps that are to be used # append the headers to the output # make sure that single outputs are not encapsulated in a list # returns the number of headers that are (lazily) available # too complicated to deal with slices: raise an error if user wants to set a whole slice # make sure the file belonging to index is loaded # delete an item (e.g. when removing bad images after cropping) # this is done automatically, since _valid_steps is passed by reference! # def __delitem__( self, index ) # convert representation into a regular python list (will load everything) # upon print call return a list representation (will load everything) # Test code | 3.064839 | 3 |
src/fr/tagc/rainet/core/data/RNATissueExpression.py | TAGC-Brun/RAINET-RNA | 0 | 6616555 | <reponame>TAGC-Brun/RAINET-RNA
from sqlalchemy import Column, String, Float, ForeignKey, ForeignKeyConstraint, PrimaryKeyConstraint
from sqlalchemy.orm import relationship
from fr.tagc.rainet.core.util.sql.Base import Base
from fr.tagc.rainet.core.util.sql.SQLManager import SQLManager
from fr.tagc.rainet.core.util.data.DataManager import DataManager
from fr.tagc.rainet.core.data import DataConstants
from fr.tagc.rainet.core.util.exception.NotRequiredInstantiationException import NotRequiredInstantiationException
from fr.tagc.rainet.core.util.exception.RainetException import RainetException
from fr.tagc.rainet.core.util.log.Logger import Logger
# #
# This class describes a expression of transcript in a tissue
#
class RNATissueExpression( Base ):
__tablename__ = 'RNATissueExpression'
# The expressed RNA
transcriptID = Column( String, ForeignKey( 'RNA.transcriptID'), primary_key=True)
# The tissue name
tissueName = Column( String, ForeignKey( 'Tissue.tissueName'), primary_key=True)
# The expression value
expressionValue = Column( Float )
#
# The constructor of the class
#
# @param tissue_name : string - the tissue name
# @param transcript_id : string - the RNA transcript ID
# @param expression_value : float - the actual expression value
# @param source_db : string - database/dataset where the information comes from. This is given in the DataConstants.
def __init__(self, transcript_id, tissue_name, expression_value, source_db):
#=======================================================================
# Approach: read single file which contains the expression values and the tissues.
# Create the tissue objects while reading the file and add the correspondence between
# tissue and RNA in the Tissue
#
# Avoiding SQL queries for performance purposes
#=======================================================================
dt_manager = DataManager.get_instance()
sql_session = SQLManager.get_instance().get_session()
#=======================================================================
# Search for the RNA object
#=======================================================================
# use previously created dictionary containing all RNA objects identified by transcriptID
if transcript_id in dt_manager.get_data( DataConstants.RNA_ALL_KW):
# Add RNA transcriptID correspondence to this instance
self.transcriptID = transcript_id #here
else:
# Some transcript IDs from input expression file (e.g. GTEx) are deprecated in the Ensembl version used for the RNA models
Logger.get_instance().warning( "RNATissueExpression.init : RNA not found for transcriptID = " + transcript_id )
raise NotRequiredInstantiationException( "RNATissueExpression.init : RNATissueExpression objects not inserted since corresponding RNA is not found.")
#=======================================================================
# Build the Tissue objects related to the RNA expression
# use temporary DataManager object to see if tissue is already present, if not, create new entry
#=======================================================================
from fr.tagc.rainet.core.data.Tissue import Tissue
# Create data structure external to this instance to accumulate already processed tissue names
kw = "tempSet"
if kw not in dt_manager.data.keys():
# initialise data manager as a set
dt_manager.store_data( kw, set())
if tissue_name not in dt_manager.data[ kw]:
tissueObj = Tissue( tissue_name, source_db)
sql_session.add( tissueObj)
# add tissue names to data manager
dt_manager.data[ kw].add( tissue_name)
# Add tissue name correspondance to this instance
self.tissueName = tissue_name
#=======================================================================
# Add supplementary info about the expression
#=======================================================================
try:
expression = float( expression_value)
except ValueError as ve:
raise RainetException( "RNATissueExpression.__init__ : The expression value is not a float: " + str( expression_value ), ve )
self.expressionValue = expression
##
# Add the object to SQLAlchemy session if it is linked to a protein
def add_to_session(self):
sql_session = SQLManager.get_instance().get_session()
sql_session.add( self)
| from sqlalchemy import Column, String, Float, ForeignKey, ForeignKeyConstraint, PrimaryKeyConstraint
from sqlalchemy.orm import relationship
from fr.tagc.rainet.core.util.sql.Base import Base
from fr.tagc.rainet.core.util.sql.SQLManager import SQLManager
from fr.tagc.rainet.core.util.data.DataManager import DataManager
from fr.tagc.rainet.core.data import DataConstants
from fr.tagc.rainet.core.util.exception.NotRequiredInstantiationException import NotRequiredInstantiationException
from fr.tagc.rainet.core.util.exception.RainetException import RainetException
from fr.tagc.rainet.core.util.log.Logger import Logger
# #
# This class describes a expression of transcript in a tissue
#
class RNATissueExpression( Base ):
__tablename__ = 'RNATissueExpression'
# The expressed RNA
transcriptID = Column( String, ForeignKey( 'RNA.transcriptID'), primary_key=True)
# The tissue name
tissueName = Column( String, ForeignKey( 'Tissue.tissueName'), primary_key=True)
# The expression value
expressionValue = Column( Float )
#
# The constructor of the class
#
# @param tissue_name : string - the tissue name
# @param transcript_id : string - the RNA transcript ID
# @param expression_value : float - the actual expression value
# @param source_db : string - database/dataset where the information comes from. This is given in the DataConstants.
def __init__(self, transcript_id, tissue_name, expression_value, source_db):
#=======================================================================
# Approach: read single file which contains the expression values and the tissues.
# Create the tissue objects while reading the file and add the correspondence between
# tissue and RNA in the Tissue
#
# Avoiding SQL queries for performance purposes
#=======================================================================
dt_manager = DataManager.get_instance()
sql_session = SQLManager.get_instance().get_session()
#=======================================================================
# Search for the RNA object
#=======================================================================
# use previously created dictionary containing all RNA objects identified by transcriptID
if transcript_id in dt_manager.get_data( DataConstants.RNA_ALL_KW):
# Add RNA transcriptID correspondence to this instance
self.transcriptID = transcript_id #here
else:
# Some transcript IDs from input expression file (e.g. GTEx) are deprecated in the Ensembl version used for the RNA models
Logger.get_instance().warning( "RNATissueExpression.init : RNA not found for transcriptID = " + transcript_id )
raise NotRequiredInstantiationException( "RNATissueExpression.init : RNATissueExpression objects not inserted since corresponding RNA is not found.")
#=======================================================================
# Build the Tissue objects related to the RNA expression
# use temporary DataManager object to see if tissue is already present, if not, create new entry
#=======================================================================
from fr.tagc.rainet.core.data.Tissue import Tissue
# Create data structure external to this instance to accumulate already processed tissue names
kw = "tempSet"
if kw not in dt_manager.data.keys():
# initialise data manager as a set
dt_manager.store_data( kw, set())
if tissue_name not in dt_manager.data[ kw]:
tissueObj = Tissue( tissue_name, source_db)
sql_session.add( tissueObj)
# add tissue names to data manager
dt_manager.data[ kw].add( tissue_name)
# Add tissue name correspondance to this instance
self.tissueName = tissue_name
#=======================================================================
# Add supplementary info about the expression
#=======================================================================
try:
expression = float( expression_value)
except ValueError as ve:
raise RainetException( "RNATissueExpression.__init__ : The expression value is not a float: " + str( expression_value ), ve )
self.expressionValue = expression
##
# Add the object to SQLAlchemy session if it is linked to a protein
def add_to_session(self):
sql_session = SQLManager.get_instance().get_session()
sql_session.add( self) | en | 0.669666 | # # # This class describes a expression of transcript in a tissue # # The expressed RNA # The tissue name # The expression value # # The constructor of the class # # @param tissue_name : string - the tissue name # @param transcript_id : string - the RNA transcript ID # @param expression_value : float - the actual expression value # @param source_db : string - database/dataset where the information comes from. This is given in the DataConstants. #======================================================================= # Approach: read single file which contains the expression values and the tissues. # Create the tissue objects while reading the file and add the correspondence between # tissue and RNA in the Tissue # # Avoiding SQL queries for performance purposes #======================================================================= #======================================================================= # Search for the RNA object #======================================================================= # use previously created dictionary containing all RNA objects identified by transcriptID # Add RNA transcriptID correspondence to this instance #here # Some transcript IDs from input expression file (e.g. GTEx) are deprecated in the Ensembl version used for the RNA models #======================================================================= # Build the Tissue objects related to the RNA expression # use temporary DataManager object to see if tissue is already present, if not, create new entry #======================================================================= # Create data structure external to this instance to accumulate already processed tissue names # initialise data manager as a set # add tissue names to data manager # Add tissue name correspondance to this instance #======================================================================= # Add supplementary info about the expression #======================================================================= ## # Add the object to SQLAlchemy session if it is linked to a protein | 2.633061 | 3 |
Lab1/ex2/parallel/OpenMP/plots/s_plot.py | PanosAntoniadis/pps-ntua | 5 | 6616556 | import matplotlib.pyplot as plt
import sys
import numpy as np
import matplotlib
matplotlib.use('Agg')
time = []
cores = ['1', '2', '4', '8', '16', '32', '64']
fp = open(sys.argv[1])
line = fp.readline()
while line:
tokens = line.split(',')
time.append(float(tokens[2]))
line = fp.readline()
fp.close()
fig, ax = plt.subplots()
ax.grid(True)
ax.set_xlabel("number of cores")
ax.xaxis.set_ticks(np.arange(0, len(cores), 1))
ax.set_xticklabels(cores, rotation=45)
ax.set_xlim(-0.5, len(cores) - 0.5)
ax.set_ylabel("time")
ax.plot(time, label="Time", color="blue", marker='x')
plt.title("Parallel for in FW algorithm in 4096×4096")
lgd = plt.legend(['standard FW'])
lgd.draw_frame(False)
plt.savefig("s_4096.png", bbox_inches="tight")
| import matplotlib.pyplot as plt
import sys
import numpy as np
import matplotlib
matplotlib.use('Agg')
time = []
cores = ['1', '2', '4', '8', '16', '32', '64']
fp = open(sys.argv[1])
line = fp.readline()
while line:
tokens = line.split(',')
time.append(float(tokens[2]))
line = fp.readline()
fp.close()
fig, ax = plt.subplots()
ax.grid(True)
ax.set_xlabel("number of cores")
ax.xaxis.set_ticks(np.arange(0, len(cores), 1))
ax.set_xticklabels(cores, rotation=45)
ax.set_xlim(-0.5, len(cores) - 0.5)
ax.set_ylabel("time")
ax.plot(time, label="Time", color="blue", marker='x')
plt.title("Parallel for in FW algorithm in 4096×4096")
lgd = plt.legend(['standard FW'])
lgd.draw_frame(False)
plt.savefig("s_4096.png", bbox_inches="tight")
| none | 1 | 2.783962 | 3 | |
bin/public/extract_uuids_from_email_list.py | trevor-wu/e-mission-server | 21 | 6616557 | <gh_stars>10-100
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
# Converts user emails -> UUIDs
# The UUIDs can be used to extract data for moving across servers
# Typically used as the file input to the
# extract_timeline_for_day_range_and_user.py script
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import *
import sys
import logging
import gzip
import json
import argparse
import bson.json_util as bju
import emission.core.wrapper.user as ecwu
# Input data using one email per line (easy to copy/paste)
# Output data using json (easy to serialize and re-read)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(prog="extract_uuids_from_email_list")
parser.add_argument("user_email_file")
parser.add_argument("-o", "--outfile", help="the output filename (default: stdout)")
args = parser.parse_args()
user_email_filename = args.user_email_file
out_fd = sys.stdout if args.outfile is None else open(args.outfile, "w")
emails = open(user_email_filename).readlines()
uuids = []
for e in emails:
user = ecwu.User.fromEmail(e.strip())
if user is None:
logging.warning("Found no mapping for email %s" % e)
else:
uuid = user.uuid
logging.debug("Mapped email %s to uuid %s" % (e.strip(), uuid))
uuids.append(uuid)
uuid_strs = [{"uuid": u} for u in uuids]
json.dump(uuid_strs, out_fd, default=bju.default)
| from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
# Converts user emails -> UUIDs
# The UUIDs can be used to extract data for moving across servers
# Typically used as the file input to the
# extract_timeline_for_day_range_and_user.py script
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import *
import sys
import logging
import gzip
import json
import argparse
import bson.json_util as bju
import emission.core.wrapper.user as ecwu
# Input data using one email per line (easy to copy/paste)
# Output data using json (easy to serialize and re-read)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(prog="extract_uuids_from_email_list")
parser.add_argument("user_email_file")
parser.add_argument("-o", "--outfile", help="the output filename (default: stdout)")
args = parser.parse_args()
user_email_filename = args.user_email_file
out_fd = sys.stdout if args.outfile is None else open(args.outfile, "w")
emails = open(user_email_filename).readlines()
uuids = []
for e in emails:
user = ecwu.User.fromEmail(e.strip())
if user is None:
logging.warning("Found no mapping for email %s" % e)
else:
uuid = user.uuid
logging.debug("Mapped email %s to uuid %s" % (e.strip(), uuid))
uuids.append(uuid)
uuid_strs = [{"uuid": u} for u in uuids]
json.dump(uuid_strs, out_fd, default=bju.default) | en | 0.792418 | # Converts user emails -> UUIDs # The UUIDs can be used to extract data for moving across servers # Typically used as the file input to the # extract_timeline_for_day_range_and_user.py script # Input data using one email per line (easy to copy/paste) # Output data using json (easy to serialize and re-read) | 2.766456 | 3 |
app/providers/person.py | randomicu/fakedata-provider | 1 | 6616558 | #!/usr/bin/env python
# This module is wrapper for Mimesis's gender provider
import functools
from typing import Dict
from typing import Optional
from typing import Union
from mimesis import Person
from mimesis.builtins import RussiaSpecProvider
from mimesis.enums import Gender
@functools.lru_cache()
def get_person_object(lang: str) -> Person:
return Person(lang)
@functools.lru_cache()
def get_ru_special_fields() -> RussiaSpecProvider:
return RussiaSpecProvider()
def get_data(person: Person, gender: Gender):
first_name = person.first_name(gender=gender)
last_name = person.last_name(gender=gender)
return {
'age': person.age(),
'email': person.email(),
'first_name': first_name,
'full_name': f'{first_name} {last_name}',
'gender': str(gender.value).capitalize(),
'height': person.height(),
'identifier': person.identifier(),
'last_name': last_name,
'nationality': person.nationality(gender=gender),
'occupation': person.occupation(),
'password': <PASSWORD>(),
'political_views': person.political_views(),
'telephone': person.telephone(),
'title': person.title(gender=gender),
'university': person.university(),
'username': person.username(),
'weight': person.weight(),
'work_experience': person.work_experience(),
}
def get_additional_data(lang: str, gender: Gender) -> Optional[Dict[str, str]]:
if lang == 'ru':
ru = get_ru_special_fields()
additional_data = {
'patronymic': ru.patronymic(gender=gender),
'inn': ru.inn(),
'kpp': ru.kpp(),
'bic': ru.bic(),
'ogrn': ru.ogrn(),
'passport': ru.series_and_number()
}
return additional_data
else:
return None
def get_person_gender(gender_code: Union[str, int]) -> Gender:
"""
:param gender_code:
Codes for the representation of human sexes is an international
standard (0 - not known, 1 - male, 2 - female, 9 - not applicable).
"""
if gender_code == 0:
return Gender.MALE
elif gender_code == 1:
return Gender.MALE
elif gender_code == 2:
return Gender.FEMALE
else:
return Gender.FEMALE
| #!/usr/bin/env python
# This module is wrapper for Mimesis's gender provider
import functools
from typing import Dict
from typing import Optional
from typing import Union
from mimesis import Person
from mimesis.builtins import RussiaSpecProvider
from mimesis.enums import Gender
@functools.lru_cache()
def get_person_object(lang: str) -> Person:
return Person(lang)
@functools.lru_cache()
def get_ru_special_fields() -> RussiaSpecProvider:
return RussiaSpecProvider()
def get_data(person: Person, gender: Gender):
first_name = person.first_name(gender=gender)
last_name = person.last_name(gender=gender)
return {
'age': person.age(),
'email': person.email(),
'first_name': first_name,
'full_name': f'{first_name} {last_name}',
'gender': str(gender.value).capitalize(),
'height': person.height(),
'identifier': person.identifier(),
'last_name': last_name,
'nationality': person.nationality(gender=gender),
'occupation': person.occupation(),
'password': <PASSWORD>(),
'political_views': person.political_views(),
'telephone': person.telephone(),
'title': person.title(gender=gender),
'university': person.university(),
'username': person.username(),
'weight': person.weight(),
'work_experience': person.work_experience(),
}
def get_additional_data(lang: str, gender: Gender) -> Optional[Dict[str, str]]:
if lang == 'ru':
ru = get_ru_special_fields()
additional_data = {
'patronymic': ru.patronymic(gender=gender),
'inn': ru.inn(),
'kpp': ru.kpp(),
'bic': ru.bic(),
'ogrn': ru.ogrn(),
'passport': ru.series_and_number()
}
return additional_data
else:
return None
def get_person_gender(gender_code: Union[str, int]) -> Gender:
"""
:param gender_code:
Codes for the representation of human sexes is an international
standard (0 - not known, 1 - male, 2 - female, 9 - not applicable).
"""
if gender_code == 0:
return Gender.MALE
elif gender_code == 1:
return Gender.MALE
elif gender_code == 2:
return Gender.FEMALE
else:
return Gender.FEMALE
| en | 0.712865 | #!/usr/bin/env python # This module is wrapper for Mimesis's gender provider :param gender_code: Codes for the representation of human sexes is an international standard (0 - not known, 1 - male, 2 - female, 9 - not applicable). | 2.232042 | 2 |
model.py | jm12138/MLP-Mixer-Paddle | 1 | 6616559 | <filename>model.py
import paddle
import paddle.nn as nn
class MlpBlock(nn.Layer):
def __init__(self, features_dim, mlp_dim):
super().__init__()
self.fc_0 = nn.Linear(features_dim, mlp_dim)
self.fc_1 = nn.Linear(mlp_dim, features_dim)
def forward(self, x):
y = self.fc_0(x)
y = nn.functional.gelu(y)
y = self.fc_1(y)
return y
class MixerBlock(nn.Layer):
def __init__(self, token_dim, channels_dim,
tokens_mlp_dim, channels_mlp_dim,
norm_layer=nn.LayerNorm, epsilon=1e-6):
super().__init__()
self.norm_0 = norm_layer(channels_dim, epsilon=epsilon)
self.token_mixing = MlpBlock(token_dim, tokens_mlp_dim)
self.norm_1 = norm_layer(channels_dim, epsilon=epsilon)
self.channel_mixing = MlpBlock(channels_dim, channels_mlp_dim)
def forward(self, x):
y = self.norm_0(x)
y = y.transpose((0, 2, 1))
y = self.token_mixing(y)
y = y.transpose((0, 2, 1))
x = x + y
y = self.norm_1(x)
y = self.channel_mixing(y)
x = x + y
return x
class MlpMixer(nn.Layer):
def __init__(self, img_size=(224, 224), patch_size=(16, 16),
num_blocks=12, hidden_dim=768,
tokens_mlp_dim=384, channels_mlp_dim=3072,
norm_layer=nn.LayerNorm, epsilon=1e-6,
class_dim=1000):
super().__init__()
self.class_dim = class_dim
self.stem = nn.Conv2D(
3, hidden_dim, kernel_size=patch_size, stride=patch_size)
blocks = [
MixerBlock(
(img_size[0] // patch_size[0]) ** 2,
hidden_dim,
tokens_mlp_dim,
channels_mlp_dim,
norm_layer,
epsilon
) for _ in range(num_blocks)
]
self.blocks = nn.Sequential(*blocks)
self.pre_head_layer_norm = norm_layer(hidden_dim, epsilon=epsilon)
if class_dim > 0:
self.head = nn.Linear(hidden_dim, class_dim)
def forward(self, inputs):
x = self.stem(inputs)
x = x.transpose((0, 2, 3, 1))
x = x.flatten(1, 2)
x = self.blocks(x)
x = self.pre_head_layer_norm(x)
if self.class_dim > 0:
x = x.mean(axis=1)
x = self.head(x)
return x
def mixer_b(pretrained=False, **kwargs):
'''
Model: MLP-mixer-base
Params:
pretrained: load the pretrained model
img_size: input image size
patch_size: patch size
num_classes: number of classes
num_blocks: number of MixerBlock
hidden_dim: dim of hidden
tokens_mlp_dim: dim of tokens_mlp
channels_mlp_dim: dim of channels_mlp
'''
model = MlpMixer(
hidden_dim=768,
num_blocks=12,
tokens_mlp_dim=384,
channels_mlp_dim=3072,
**kwargs
)
if pretrained:
path = paddle.utils.download.get_weights_path_from_url('https://bj.bcebos.com/v1/ai-studio-online/8fcd0b6ba98042d68763bbcbfe96375cbfd97ffed8334ac09787ef73ecf9989f?responseContentDisposition=attachment%3B%20filename%3Dimagenet1k_Mixer-B_16.pdparams')
model.set_dict(paddle.load(path))
return model
def mixer_l(pretrained=False, **kwargs):
'''
Model: MLP-mixer-large
Params:
pretrained: load the pretrained model
img_size: input image size
patch_size: patch size
num_classes: number of classes
num_blocks: number of MixerBlock
hidden_dim: dim of hidden
tokens_mlp_dim: dim of tokens_mlp
channels_mlp_dim: dim of channels_mlp
'''
model = MlpMixer(
hidden_dim=1024,
num_blocks=24,
tokens_mlp_dim=512,
channels_mlp_dim=4096,
**kwargs
)
if pretrained:
path = paddle.utils.download.get_weights_path_from_url('https://bj.bcebos.com/v1/ai-studio-online/ca74ababd4834e34b089c1485989738de4fdf6a97be645ed81b6e39449c5815c?responseContentDisposition=attachment%3B%20filename%3Dimagenet1k_Mixer-L_16.pdparams')
model.set_dict(paddle.load(path))
return model
| <filename>model.py
import paddle
import paddle.nn as nn
class MlpBlock(nn.Layer):
def __init__(self, features_dim, mlp_dim):
super().__init__()
self.fc_0 = nn.Linear(features_dim, mlp_dim)
self.fc_1 = nn.Linear(mlp_dim, features_dim)
def forward(self, x):
y = self.fc_0(x)
y = nn.functional.gelu(y)
y = self.fc_1(y)
return y
class MixerBlock(nn.Layer):
def __init__(self, token_dim, channels_dim,
tokens_mlp_dim, channels_mlp_dim,
norm_layer=nn.LayerNorm, epsilon=1e-6):
super().__init__()
self.norm_0 = norm_layer(channels_dim, epsilon=epsilon)
self.token_mixing = MlpBlock(token_dim, tokens_mlp_dim)
self.norm_1 = norm_layer(channels_dim, epsilon=epsilon)
self.channel_mixing = MlpBlock(channels_dim, channels_mlp_dim)
def forward(self, x):
y = self.norm_0(x)
y = y.transpose((0, 2, 1))
y = self.token_mixing(y)
y = y.transpose((0, 2, 1))
x = x + y
y = self.norm_1(x)
y = self.channel_mixing(y)
x = x + y
return x
class MlpMixer(nn.Layer):
def __init__(self, img_size=(224, 224), patch_size=(16, 16),
num_blocks=12, hidden_dim=768,
tokens_mlp_dim=384, channels_mlp_dim=3072,
norm_layer=nn.LayerNorm, epsilon=1e-6,
class_dim=1000):
super().__init__()
self.class_dim = class_dim
self.stem = nn.Conv2D(
3, hidden_dim, kernel_size=patch_size, stride=patch_size)
blocks = [
MixerBlock(
(img_size[0] // patch_size[0]) ** 2,
hidden_dim,
tokens_mlp_dim,
channels_mlp_dim,
norm_layer,
epsilon
) for _ in range(num_blocks)
]
self.blocks = nn.Sequential(*blocks)
self.pre_head_layer_norm = norm_layer(hidden_dim, epsilon=epsilon)
if class_dim > 0:
self.head = nn.Linear(hidden_dim, class_dim)
def forward(self, inputs):
x = self.stem(inputs)
x = x.transpose((0, 2, 3, 1))
x = x.flatten(1, 2)
x = self.blocks(x)
x = self.pre_head_layer_norm(x)
if self.class_dim > 0:
x = x.mean(axis=1)
x = self.head(x)
return x
def mixer_b(pretrained=False, **kwargs):
'''
Model: MLP-mixer-base
Params:
pretrained: load the pretrained model
img_size: input image size
patch_size: patch size
num_classes: number of classes
num_blocks: number of MixerBlock
hidden_dim: dim of hidden
tokens_mlp_dim: dim of tokens_mlp
channels_mlp_dim: dim of channels_mlp
'''
model = MlpMixer(
hidden_dim=768,
num_blocks=12,
tokens_mlp_dim=384,
channels_mlp_dim=3072,
**kwargs
)
if pretrained:
path = paddle.utils.download.get_weights_path_from_url('https://bj.bcebos.com/v1/ai-studio-online/8fcd0b6ba98042d68763bbcbfe96375cbfd97ffed8334ac09787ef73ecf9989f?responseContentDisposition=attachment%3B%20filename%3Dimagenet1k_Mixer-B_16.pdparams')
model.set_dict(paddle.load(path))
return model
def mixer_l(pretrained=False, **kwargs):
'''
Model: MLP-mixer-large
Params:
pretrained: load the pretrained model
img_size: input image size
patch_size: patch size
num_classes: number of classes
num_blocks: number of MixerBlock
hidden_dim: dim of hidden
tokens_mlp_dim: dim of tokens_mlp
channels_mlp_dim: dim of channels_mlp
'''
model = MlpMixer(
hidden_dim=1024,
num_blocks=24,
tokens_mlp_dim=512,
channels_mlp_dim=4096,
**kwargs
)
if pretrained:
path = paddle.utils.download.get_weights_path_from_url('https://bj.bcebos.com/v1/ai-studio-online/ca74ababd4834e34b089c1485989738de4fdf6a97be645ed81b6e39449c5815c?responseContentDisposition=attachment%3B%20filename%3Dimagenet1k_Mixer-L_16.pdparams')
model.set_dict(paddle.load(path))
return model
| en | 0.642697 | Model: MLP-mixer-base Params: pretrained: load the pretrained model img_size: input image size patch_size: patch size num_classes: number of classes num_blocks: number of MixerBlock hidden_dim: dim of hidden tokens_mlp_dim: dim of tokens_mlp channels_mlp_dim: dim of channels_mlp Model: MLP-mixer-large Params: pretrained: load the pretrained model img_size: input image size patch_size: patch size num_classes: number of classes num_blocks: number of MixerBlock hidden_dim: dim of hidden tokens_mlp_dim: dim of tokens_mlp channels_mlp_dim: dim of channels_mlp | 2.755982 | 3 |
Res-34_fruit.py | JerryGCDing/AI | 0 | 6616560 | # -*-coding: utf-8-*-
import numpy as np
import opencv
import random
import tensorflow as tf
import tensorflow.contrib.slim as slim
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
file_path = r'D:\Artificial_Intellegence_Project\Practical\Fruit\fruit\Training\\'
# claim variables
xs = tf.placeholder(tf.float32, [None, 100, 100, 3])
ys = tf.placeholder(tf.float32, [None, 50])
# keep_prob = tf.placeholder(tf.float32)
global_step = tf.Variable(0)
x_image = tf.reshape(xs, [-1, 100, 100, 3])
# weight
def weights(shape):
init = tf.truncated_normal(shape, stddev=0.01)
return tf.Variable(init)
# biases
def biases(shape):
init = tf.constant(0.02, shape=shape)
return tf.Variable(init)
# identity layer
def identity(inputs, out_size, k_size, stage, block):
x_short_cut = inputs
block_name = 'res'+str(stage)+str(block)
with tf.variable_scope(block_name):
# convolution layer 1
conv1 = slim.conv2d(inputs, out_size, k_size, stride=1, padding='SAME', activation_fn=None)
conv1_output = tf.nn.relu(tf.layers.batch_normalization(conv1, axis=3))
# convolution layer 2
conv2 = slim.conv2d(conv1_output, out_size, k_size, stride=1, padding='SAME', activation_fn=None)
conv2_BN = tf.layers.batch_normalization(conv2, axis=3)
conv2_output = tf.nn.relu(conv2_BN+x_short_cut)
return conv2_output
# convolution layer
def conv(inputs, out_size, k_size, stage, block):
x_short_cut = inputs
block_name = 'res'+str(stage)+str(block)
with tf.variable_scope(block_name):
# convolution layer 1
conv1 = slim.conv2d(inputs, out_size, k_size, stride=2, padding='SAME', activation_fn=None)
conv1_output = tf.nn.relu(tf.layers.batch_normalization(conv1, axis=3))
# convolution layer 2
conv2 = slim.conv2d(conv1_output, out_size, k_size, stride=1, padding='SAME', activation_fn=None)
conv2_output = tf.layers.batch_normalization(conv2, axis=3)
# input reshape
input_conv = slim.conv2d(x_short_cut, out_size, k_size, stride=2, padding='SAME', activation_fn=None)
input_reshape = tf.layers.batch_normalization(input_conv, axis=3)
# output
output = tf.nn.relu(input_reshape+conv2_output)
return output
# stage 1
conv1 = slim.conv2d(x_image, 64, 7, stride=2, padding='VALID')
conv1_relu = tf.nn.relu(tf.layers.batch_normalization(conv1))
h1_pool = tf.nn.max_pool(conv1_relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# stage 2
id_2_1 = identity(h1_pool, 64, 3, 2, 1)
id_2_2 = identity(id_2_1, 64, 3, 2, 2)
id_2_3 = identity(id_2_2, 64, 3, 2, 3)
# stage 3
conv_3_1 = conv(id_2_3, 128, 3, 3, 1)
id_3_2 = identity(conv_3_1, 128, 3, 3, 2)
id_3_3 = identity(id_3_2, 128, 3, 3, 3)
id_3_4 = identity(id_3_3, 128, 3, 3, 4)
# stage 4
conv_4_1 = conv(id_3_4, 256, 3, 4, 1)
id_4_2 = identity(conv_4_1, 256, 3, 4, 2)
id_4_3 = identity(id_4_2, 256, 3, 4, 3)
id_4_4 = identity(id_4_3, 256, 3, 4, 4)
id_4_5 = identity(id_4_4, 256, 3, 4, 5)
id_4_6 = identity(id_4_5, 256, 3, 4, 6)
# stage 5
conv_5_1 = conv(id_4_6, 512, 3, 5, 1)
id_5_2 = identity(conv_5_1, 512, 3, 5, 2)
id_5_3 = identity(id_5_2, 512, 3, 5, 3)
# fc layer
h_pool = tf.nn.avg_pool(id_5_3, [1, 2, 2, 1], strides=[1, 1, 1, 1], padding='SAME')
h_pool_flaten = tf.reshape(h_pool, [-1, 3*3*512])
# stage 6
w_fc1 = weights([3*3*512, 50])
b_fc1 = biases([50])
h_fc1 = tf.matmul(h_pool_flaten, w_fc1)+b_fc1
prediction = tf.nn.softmax(h_fc1)
cross_entropy = -tf.reduce_mean(tf.reduce_sum(ys*tf.log(prediction), reduction_indices=[1]))
# learning rate decay
learning_rate = tf.train.exponential_decay(1e-3, global_step, staircase=True, decay_rate=0.96, decay_steps=20000)
# train step
train_step = tf.train.AdamOptimizer(learning_rate, epsilon=0.1).minimize(cross_entropy, global_step=global_step)
sess.run(tf.global_variables_initializer())
correct_prediction = tf.equal(tf.argmax(ys, 1), tf.argmax(prediction, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
img, label = opencv.file_name(file_path)
label_op = sess.run(tf.one_hot(label, depth=50))
index = [i for i in range(len(img))]
random.shuffle(index)
print('training-----------------------------------------------')
for i in range(1500):
update = tf.assign(global_step, i)
if i >= 502:
a = i % 502
else:
a = i
img_batch = np.array(img)[index[a*50: a*50+50]]
label_batch = np.array(label_op[index[a*50: a*50+50]])
sess.run(train_step, feed_dict={xs: img_batch, ys: label_batch})
sess.run(update)
if (i+1) % 10 == 0:
print((i+1), sess.run(accuracy, feed_dict={xs: img_batch, ys: label_batch}))
# save_path = saver_1.save(sess, check_dir, global_step=0)
file_path = r'D:\Artificial_Intellegence_Project\Practical\Fruit\fruit\Test\\'
correct_prediction = tf.equal(tf.argmax(ys, 1), tf.argmax(prediction, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
img, label = opencv.file_name(file_path)
label_op = sess.run(tf.one_hot(label, depth=50))
print('testing=================================================')
for a in range(169):
label_batch = np.array(label_op[index[a*50: a*50+50]])
img_batch = np.array(img)[index[a*50: a*50+50]]
if (a+1) % 10 == 0:
print(sess.run(accuracy, feed_dict={xs: img_batch, ys: label_batch}))
sess.close()
| # -*-coding: utf-8-*-
import numpy as np
import opencv
import random
import tensorflow as tf
import tensorflow.contrib.slim as slim
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
file_path = r'D:\Artificial_Intellegence_Project\Practical\Fruit\fruit\Training\\'
# claim variables
xs = tf.placeholder(tf.float32, [None, 100, 100, 3])
ys = tf.placeholder(tf.float32, [None, 50])
# keep_prob = tf.placeholder(tf.float32)
global_step = tf.Variable(0)
x_image = tf.reshape(xs, [-1, 100, 100, 3])
# weight
def weights(shape):
init = tf.truncated_normal(shape, stddev=0.01)
return tf.Variable(init)
# biases
def biases(shape):
init = tf.constant(0.02, shape=shape)
return tf.Variable(init)
# identity layer
def identity(inputs, out_size, k_size, stage, block):
x_short_cut = inputs
block_name = 'res'+str(stage)+str(block)
with tf.variable_scope(block_name):
# convolution layer 1
conv1 = slim.conv2d(inputs, out_size, k_size, stride=1, padding='SAME', activation_fn=None)
conv1_output = tf.nn.relu(tf.layers.batch_normalization(conv1, axis=3))
# convolution layer 2
conv2 = slim.conv2d(conv1_output, out_size, k_size, stride=1, padding='SAME', activation_fn=None)
conv2_BN = tf.layers.batch_normalization(conv2, axis=3)
conv2_output = tf.nn.relu(conv2_BN+x_short_cut)
return conv2_output
# convolution layer
def conv(inputs, out_size, k_size, stage, block):
x_short_cut = inputs
block_name = 'res'+str(stage)+str(block)
with tf.variable_scope(block_name):
# convolution layer 1
conv1 = slim.conv2d(inputs, out_size, k_size, stride=2, padding='SAME', activation_fn=None)
conv1_output = tf.nn.relu(tf.layers.batch_normalization(conv1, axis=3))
# convolution layer 2
conv2 = slim.conv2d(conv1_output, out_size, k_size, stride=1, padding='SAME', activation_fn=None)
conv2_output = tf.layers.batch_normalization(conv2, axis=3)
# input reshape
input_conv = slim.conv2d(x_short_cut, out_size, k_size, stride=2, padding='SAME', activation_fn=None)
input_reshape = tf.layers.batch_normalization(input_conv, axis=3)
# output
output = tf.nn.relu(input_reshape+conv2_output)
return output
# stage 1
conv1 = slim.conv2d(x_image, 64, 7, stride=2, padding='VALID')
conv1_relu = tf.nn.relu(tf.layers.batch_normalization(conv1))
h1_pool = tf.nn.max_pool(conv1_relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# stage 2
id_2_1 = identity(h1_pool, 64, 3, 2, 1)
id_2_2 = identity(id_2_1, 64, 3, 2, 2)
id_2_3 = identity(id_2_2, 64, 3, 2, 3)
# stage 3
conv_3_1 = conv(id_2_3, 128, 3, 3, 1)
id_3_2 = identity(conv_3_1, 128, 3, 3, 2)
id_3_3 = identity(id_3_2, 128, 3, 3, 3)
id_3_4 = identity(id_3_3, 128, 3, 3, 4)
# stage 4
conv_4_1 = conv(id_3_4, 256, 3, 4, 1)
id_4_2 = identity(conv_4_1, 256, 3, 4, 2)
id_4_3 = identity(id_4_2, 256, 3, 4, 3)
id_4_4 = identity(id_4_3, 256, 3, 4, 4)
id_4_5 = identity(id_4_4, 256, 3, 4, 5)
id_4_6 = identity(id_4_5, 256, 3, 4, 6)
# stage 5
conv_5_1 = conv(id_4_6, 512, 3, 5, 1)
id_5_2 = identity(conv_5_1, 512, 3, 5, 2)
id_5_3 = identity(id_5_2, 512, 3, 5, 3)
# fc layer
h_pool = tf.nn.avg_pool(id_5_3, [1, 2, 2, 1], strides=[1, 1, 1, 1], padding='SAME')
h_pool_flaten = tf.reshape(h_pool, [-1, 3*3*512])
# stage 6
w_fc1 = weights([3*3*512, 50])
b_fc1 = biases([50])
h_fc1 = tf.matmul(h_pool_flaten, w_fc1)+b_fc1
prediction = tf.nn.softmax(h_fc1)
cross_entropy = -tf.reduce_mean(tf.reduce_sum(ys*tf.log(prediction), reduction_indices=[1]))
# learning rate decay
learning_rate = tf.train.exponential_decay(1e-3, global_step, staircase=True, decay_rate=0.96, decay_steps=20000)
# train step
train_step = tf.train.AdamOptimizer(learning_rate, epsilon=0.1).minimize(cross_entropy, global_step=global_step)
sess.run(tf.global_variables_initializer())
correct_prediction = tf.equal(tf.argmax(ys, 1), tf.argmax(prediction, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
img, label = opencv.file_name(file_path)
label_op = sess.run(tf.one_hot(label, depth=50))
index = [i for i in range(len(img))]
random.shuffle(index)
print('training-----------------------------------------------')
for i in range(1500):
update = tf.assign(global_step, i)
if i >= 502:
a = i % 502
else:
a = i
img_batch = np.array(img)[index[a*50: a*50+50]]
label_batch = np.array(label_op[index[a*50: a*50+50]])
sess.run(train_step, feed_dict={xs: img_batch, ys: label_batch})
sess.run(update)
if (i+1) % 10 == 0:
print((i+1), sess.run(accuracy, feed_dict={xs: img_batch, ys: label_batch}))
# save_path = saver_1.save(sess, check_dir, global_step=0)
file_path = r'D:\Artificial_Intellegence_Project\Practical\Fruit\fruit\Test\\'
correct_prediction = tf.equal(tf.argmax(ys, 1), tf.argmax(prediction, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
img, label = opencv.file_name(file_path)
label_op = sess.run(tf.one_hot(label, depth=50))
print('testing=================================================')
for a in range(169):
label_batch = np.array(label_op[index[a*50: a*50+50]])
img_batch = np.array(img)[index[a*50: a*50+50]]
if (a+1) % 10 == 0:
print(sess.run(accuracy, feed_dict={xs: img_batch, ys: label_batch}))
sess.close()
| en | 0.47721 | # -*-coding: utf-8-*- # claim variables # keep_prob = tf.placeholder(tf.float32) # weight # biases # identity layer # convolution layer 1 # convolution layer 2 # convolution layer # convolution layer 1 # convolution layer 2 # input reshape # output # stage 1 # stage 2 # stage 3 # stage 4 # stage 5 # fc layer # stage 6 # learning rate decay # train step # save_path = saver_1.save(sess, check_dir, global_step=0) | 2.40156 | 2 |
ensysmod/crud/dataset.py | NOWUM/EnSysMod | 1 | 6616561 | from typing import Optional, Union, Any
from sqlalchemy.orm import Session
from ensysmod.crud.base import CRUDBase
from ensysmod.crud.dataset_permission import dataset_permission
from ensysmod.model import Dataset
from ensysmod.schemas import DatasetCreate, DatasetUpdate, DatasetPermissionCreate
# noinspection PyMethodMayBeStatic,PyArgumentList
class CRUDDataset(CRUDBase[Dataset, DatasetCreate, DatasetUpdate]):
"""
CRUD operations for Dataset
"""
def create(self, db: Session, *, obj_in: Union[DatasetCreate, Dataset, dict]) -> Dataset:
new_dataset: Dataset = super().create(db, obj_in=obj_in)
# Add permission for creator
creator_permission = DatasetPermissionCreate(
ref_dataset=new_dataset.id,
ref_user=new_dataset.ref_created_by,
allow_usage=True,
allow_modification=True,
allow_permission_grant=True,
allow_permission_revoke=True,
)
dataset_permission.create(db, obj_in=creator_permission)
return new_dataset
def get_by_name(self, db: Session, *, name: str) -> Optional[Dataset]:
return db.query(Dataset).filter(Dataset.name == name).first()
def remove(self, db: Session, *, id: Any) -> Dataset:
dataset_permission.remove_by_dataset(db, dataset_id=id)
return super().remove(db, id=id)
dataset = CRUDDataset(Dataset)
| from typing import Optional, Union, Any
from sqlalchemy.orm import Session
from ensysmod.crud.base import CRUDBase
from ensysmod.crud.dataset_permission import dataset_permission
from ensysmod.model import Dataset
from ensysmod.schemas import DatasetCreate, DatasetUpdate, DatasetPermissionCreate
# noinspection PyMethodMayBeStatic,PyArgumentList
class CRUDDataset(CRUDBase[Dataset, DatasetCreate, DatasetUpdate]):
"""
CRUD operations for Dataset
"""
def create(self, db: Session, *, obj_in: Union[DatasetCreate, Dataset, dict]) -> Dataset:
new_dataset: Dataset = super().create(db, obj_in=obj_in)
# Add permission for creator
creator_permission = DatasetPermissionCreate(
ref_dataset=new_dataset.id,
ref_user=new_dataset.ref_created_by,
allow_usage=True,
allow_modification=True,
allow_permission_grant=True,
allow_permission_revoke=True,
)
dataset_permission.create(db, obj_in=creator_permission)
return new_dataset
def get_by_name(self, db: Session, *, name: str) -> Optional[Dataset]:
return db.query(Dataset).filter(Dataset.name == name).first()
def remove(self, db: Session, *, id: Any) -> Dataset:
dataset_permission.remove_by_dataset(db, dataset_id=id)
return super().remove(db, id=id)
dataset = CRUDDataset(Dataset)
| en | 0.403214 | # noinspection PyMethodMayBeStatic,PyArgumentList CRUD operations for Dataset # Add permission for creator | 2.308282 | 2 |
env/Lib/site-packages/OpenGL/GL/ARB/derivative_control.py | 5gconnectedbike/Navio2 | 210 | 6616562 | <gh_stars>100-1000
'''OpenGL extension ARB.derivative_control
This module customises the behaviour of the
OpenGL.raw.GL.ARB.derivative_control to provide a more
Python-friendly API
Overview (from the spec)
This extension provides control over the spacial granularity at which the
underlying implementation computes derivatives.
For example, for the coarse-granularity derivative, a single x derivative
could be computed for each 2x2 group of pixels, using that same derivative
value for all 4 pixels. For the fine-granularity derivative, two
derivatives could be computed for each 2x2 group of pixels; one for the top
row and one for the bottom row. Implementations vary somewhat on how this
is done.
To select the coarse derivative, use:
dFdxCoarse(p)
dFdyCoarse(p)
fwidthCoarse(p)
To select the fine derivative, use:
dFdxFine(p)
dFdyFine(p)
fwidthFine(p)
To select which ever is "better" (based on performance, API hints, or other
factors), use:
dFdx(p)
dFdy(p)
fwidth(p)
This last set is the set of previously existing built-ins for derivatives,
and continues to work in a backward compatible way.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/derivative_control.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.derivative_control import *
from OpenGL.raw.GL.ARB.derivative_control import _EXTENSION_NAME
def glInitDerivativeControlARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | '''OpenGL extension ARB.derivative_control
This module customises the behaviour of the
OpenGL.raw.GL.ARB.derivative_control to provide a more
Python-friendly API
Overview (from the spec)
This extension provides control over the spacial granularity at which the
underlying implementation computes derivatives.
For example, for the coarse-granularity derivative, a single x derivative
could be computed for each 2x2 group of pixels, using that same derivative
value for all 4 pixels. For the fine-granularity derivative, two
derivatives could be computed for each 2x2 group of pixels; one for the top
row and one for the bottom row. Implementations vary somewhat on how this
is done.
To select the coarse derivative, use:
dFdxCoarse(p)
dFdyCoarse(p)
fwidthCoarse(p)
To select the fine derivative, use:
dFdxFine(p)
dFdyFine(p)
fwidthFine(p)
To select which ever is "better" (based on performance, API hints, or other
factors), use:
dFdx(p)
dFdy(p)
fwidth(p)
This last set is the set of previously existing built-ins for derivatives,
and continues to work in a backward compatible way.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/derivative_control.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.derivative_control import *
from OpenGL.raw.GL.ARB.derivative_control import _EXTENSION_NAME
def glInitDerivativeControlARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | en | 0.828179 | OpenGL extension ARB.derivative_control This module customises the behaviour of the OpenGL.raw.GL.ARB.derivative_control to provide a more Python-friendly API Overview (from the spec) This extension provides control over the spacial granularity at which the underlying implementation computes derivatives. For example, for the coarse-granularity derivative, a single x derivative could be computed for each 2x2 group of pixels, using that same derivative value for all 4 pixels. For the fine-granularity derivative, two derivatives could be computed for each 2x2 group of pixels; one for the top row and one for the bottom row. Implementations vary somewhat on how this is done. To select the coarse derivative, use: dFdxCoarse(p) dFdyCoarse(p) fwidthCoarse(p) To select the fine derivative, use: dFdxFine(p) dFdyFine(p) fwidthFine(p) To select which ever is "better" (based on performance, API hints, or other factors), use: dFdx(p) dFdy(p) fwidth(p) This last set is the set of previously existing built-ins for derivatives, and continues to work in a backward compatible way. The official definition of this extension is available here: http://www.opengl.org/registry/specs/ARB/derivative_control.txt Return boolean indicating whether this extension is available ### END AUTOGENERATED SECTION | 2.052771 | 2 |
bareasgi/http/http_callbacks.py | rob-blackbourn/bareasgi | 15 | 6616563 | <reponame>rob-blackbourn/bareasgi<filename>bareasgi/http/http_callbacks.py
"""The http callbacks"""
from typing import Awaitable, Callable
from .http_request import HttpRequest
from .http_response import HttpResponse
HttpRequestCallback = Callable[
[HttpRequest],
Awaitable[HttpResponse]
]
HttpMiddlewareCallback = Callable[
[HttpRequest, HttpRequestCallback],
Awaitable[HttpResponse]
]
| """The http callbacks"""
from typing import Awaitable, Callable
from .http_request import HttpRequest
from .http_response import HttpResponse
HttpRequestCallback = Callable[
[HttpRequest],
Awaitable[HttpResponse]
]
HttpMiddlewareCallback = Callable[
[HttpRequest, HttpRequestCallback],
Awaitable[HttpResponse]
] | en | 0.493171 | The http callbacks | 1.78432 | 2 |
grpc_opentracing/grpc_interceptor/utils.py | zhyon404/python-grpc | 0 | 6616564 | from grpc_opentracing import scope
class _LoggingIterator(object):
def __init__(self, key, iterator, span):
self._key = key
self._iterator = iterator
self._span = span
def __iter__(self):
return self
def next(self):
request = next(self._iterator)
self._span.log_kv({self._key: request})
return request
def __next__(self):
return self.next()
def log_or_wrap_request_or_iterator(span, is_client_stream,
request_or_iterator):
if is_client_stream:
return _LoggingIterator('request', request_or_iterator, span)
else:
span.log_kv({'request': request_or_iterator})
return request_or_iterator
def log_or_wrap_response_or_iterator(span, is_service_stream,
response_or_iterator):
if is_service_stream:
return _LoggingIterator('response', response_or_iterator, span)
else:
span.log_kv({'response': response_or_iterator})
return response_or_iterator
def wrap_iter_with_end_span(response_iter):
for response in response_iter:
yield response
scope.end_span()
| from grpc_opentracing import scope
class _LoggingIterator(object):
def __init__(self, key, iterator, span):
self._key = key
self._iterator = iterator
self._span = span
def __iter__(self):
return self
def next(self):
request = next(self._iterator)
self._span.log_kv({self._key: request})
return request
def __next__(self):
return self.next()
def log_or_wrap_request_or_iterator(span, is_client_stream,
request_or_iterator):
if is_client_stream:
return _LoggingIterator('request', request_or_iterator, span)
else:
span.log_kv({'request': request_or_iterator})
return request_or_iterator
def log_or_wrap_response_or_iterator(span, is_service_stream,
response_or_iterator):
if is_service_stream:
return _LoggingIterator('response', response_or_iterator, span)
else:
span.log_kv({'response': response_or_iterator})
return response_or_iterator
def wrap_iter_with_end_span(response_iter):
for response in response_iter:
yield response
scope.end_span()
| none | 1 | 2.420649 | 2 | |
resolvr/resolvr.py | remerjohnson/resolvr | 2 | 6616565 | <gh_stars>1-10
#!/usr/bin/python
# coding=UTF-8
#
# Resolvr: A Wikidata Querying App
#
import os
from flask import *
import pandas as pd
from SPARQLWrapper import SPARQLWrapper, JSON
from werkzeug.utils import secure_filename
# Declare and load the app
app = Flask(__name__)
ALLOWED_EXTENSIONS = set(['csv', 'xsl', 'xslx'])
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/')
def home(name=None):
return render_template('home.html', name=name)
@app.route('/single-query/viaf', methods=['POST', 'GET'])
def single_query_viaf():
errors = []
results = []
importantPeople = []
sparql = SPARQLWrapper("https://query.wikidata.org/sparql")
if request.method == 'POST':
# get IDs that the user has input
try:
id_entry = request.form['id_entry']
except:
errors.append('Sorry. Unable to get ID')
return render_template("/single-query/viaf.html", errors = errors)
if id_entry:
queryString = 'SELECT ?person ?personLabel WHERE { ?person wdt:P214 "' + id_entry + '" SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }}'
try:
sparql.setQuery(queryString)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
importantPeople.append({
'ID entered': id_entry,
'Wikidata URI': result["person"]["value"],
'Name': result["personLabel"]["value"]
})
except:
errors.append('Sorry, something went wrong')
df1 = pd.DataFrame(importantPeople)
return render_template("/single-query/viaf.html", errors=errors, tables=[df1.to_html(classes='table table-striped')], title = 'Results:')
@app.route('/batch_query')
def batch_query(name=None):
return render_template('batch_query.html', name=name)
@app.route('/data_view', methods=['POST'])
def data_view():
file = request.files['data_file']
if not file:
return "No file"
df = pd.read_csv(file)
return render_template("/file_analysis.html", tables=[df.to_html(classes='table table-striped')], title = 'Your data:')
if __name__ == '__main__':
app.run(debug=True)
| #!/usr/bin/python
# coding=UTF-8
#
# Resolvr: A Wikidata Querying App
#
import os
from flask import *
import pandas as pd
from SPARQLWrapper import SPARQLWrapper, JSON
from werkzeug.utils import secure_filename
# Declare and load the app
app = Flask(__name__)
ALLOWED_EXTENSIONS = set(['csv', 'xsl', 'xslx'])
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/')
def home(name=None):
return render_template('home.html', name=name)
@app.route('/single-query/viaf', methods=['POST', 'GET'])
def single_query_viaf():
errors = []
results = []
importantPeople = []
sparql = SPARQLWrapper("https://query.wikidata.org/sparql")
if request.method == 'POST':
# get IDs that the user has input
try:
id_entry = request.form['id_entry']
except:
errors.append('Sorry. Unable to get ID')
return render_template("/single-query/viaf.html", errors = errors)
if id_entry:
queryString = 'SELECT ?person ?personLabel WHERE { ?person wdt:P214 "' + id_entry + '" SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }}'
try:
sparql.setQuery(queryString)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
importantPeople.append({
'ID entered': id_entry,
'Wikidata URI': result["person"]["value"],
'Name': result["personLabel"]["value"]
})
except:
errors.append('Sorry, something went wrong')
df1 = pd.DataFrame(importantPeople)
return render_template("/single-query/viaf.html", errors=errors, tables=[df1.to_html(classes='table table-striped')], title = 'Results:')
@app.route('/batch_query')
def batch_query(name=None):
return render_template('batch_query.html', name=name)
@app.route('/data_view', methods=['POST'])
def data_view():
file = request.files['data_file']
if not file:
return "No file"
df = pd.read_csv(file)
return render_template("/file_analysis.html", tables=[df.to_html(classes='table table-striped')], title = 'Your data:')
if __name__ == '__main__':
app.run(debug=True) | en | 0.691384 | #!/usr/bin/python # coding=UTF-8 # # Resolvr: A Wikidata Querying App # # Declare and load the app # get IDs that the user has input | 2.942128 | 3 |
findingpercentage.py | codejourney1111/h1python | 0 | 6616566 | <filename>findingpercentage.py
"""
You have a record of students. Each record contains the student's name, and their percent marks in Maths, Physics and Chemistry. The marks can be floating values. The user enters some integer followed by the names and marks for students. You are required to save the record in a dictionary data type. The user then enters a student's name. Output the average percentage marks obtained by that student, correct to two decimal places.
Input Format
The first line contains the integer , the number of students. The next lines contains the name and marks obtained by that student separated by a space. The final line contains the name of a particular student previously listed.
Constraints
Output Format
Print one line: The average of the marks obtained by the particular student correct to 2 decimal places.
"""
if __name__ == '__main__':
n = int(raw_input())
student_marks = {}
for _ in range(n):
line = raw_input().split()
name, scores = line[0], line[1:]
scores = map(float, scores)
student_marks[name] = scores
query_name = raw_input()
query_scores = student_marks[query_name]
print("{0:.2f}".format(sum(query_scores)/(len(query_scores)))) | <filename>findingpercentage.py
"""
You have a record of students. Each record contains the student's name, and their percent marks in Maths, Physics and Chemistry. The marks can be floating values. The user enters some integer followed by the names and marks for students. You are required to save the record in a dictionary data type. The user then enters a student's name. Output the average percentage marks obtained by that student, correct to two decimal places.
Input Format
The first line contains the integer , the number of students. The next lines contains the name and marks obtained by that student separated by a space. The final line contains the name of a particular student previously listed.
Constraints
Output Format
Print one line: The average of the marks obtained by the particular student correct to 2 decimal places.
"""
if __name__ == '__main__':
n = int(raw_input())
student_marks = {}
for _ in range(n):
line = raw_input().split()
name, scores = line[0], line[1:]
scores = map(float, scores)
student_marks[name] = scores
query_name = raw_input()
query_scores = student_marks[query_name]
print("{0:.2f}".format(sum(query_scores)/(len(query_scores)))) | en | 0.918756 | You have a record of students. Each record contains the student's name, and their percent marks in Maths, Physics and Chemistry. The marks can be floating values. The user enters some integer followed by the names and marks for students. You are required to save the record in a dictionary data type. The user then enters a student's name. Output the average percentage marks obtained by that student, correct to two decimal places. Input Format The first line contains the integer , the number of students. The next lines contains the name and marks obtained by that student separated by a space. The final line contains the name of a particular student previously listed. Constraints Output Format Print one line: The average of the marks obtained by the particular student correct to 2 decimal places. | 4.043019 | 4 |
solutions.py | abdu1aziz/jamf-pro-scripts | 0 | 6616567 | <gh_stars>0
import time
print("SUCESS!!")
| import time
print("SUCESS!!") | none | 1 | 1.38148 | 1 | |
tests/test_controller_action_renderers.py | zeraien/django-url-framework | 15 | 6616568 | <filename>tests/test_controller_action_renderers.py
import json
import unittest
from io import BytesIO
import yaml
from django.http import HttpRequest
from django.test import RequestFactory, SimpleTestCase
from django.utils.encoding import force_bytes
from .duf_test_case import DUFTestCase
from django_url_framework.decorators import auto, json_action, yaml_action
from django_url_framework.controller import ActionController
class TestController(DUFTestCase):
def test_default_renderer_template(self):
action_response = {'data':'foo'}
class TestTemplateRendererController(ActionController):
def test_action(self, request):
return action_response
response = self._request_and_test(TestTemplateRendererController, "test_action",
expected_response="HTML:{data}".format(**action_response))
self.assertEqual(response['Content-Type'],"text/html; charset=utf-8")
def test_template_renderer_adds_request_to_template_context(self):
action_response = {'data':'foo'}
class TestTemplateRendererAddsRequestController(ActionController):
def test_has_request(self, request):
return action_response
response = self._request_and_test(TestTemplateRendererAddsRequestController, "test_has_request",
expected_response="This template <WSGIRequest: GET '/test/json/'>")
self.assertEqual(response['Content-Type'],"text/html; charset=utf-8")
def test_auto_json_yaml_str(self):
expected = {'ab':"C",1:"2",None:False}
yaml_flow = True
def _run_test(accept, expect, **kwargs):
class TestTestController(ActionController):
yaml_default_flow_style=yaml_flow
@auto()
def test_action(self, request):
return expected
self._request_and_test(TestTestController, "test_action", expected_response=expect, HTTP_ACCEPT=accept)
_run_test("application/json", json.dumps(expected))
_run_test("application/yaml", yaml.dump(expected, default_flow_style=yaml_flow).strip())
yaml_flow = False
_run_test("application/yaml", yaml.dump(expected, default_flow_style=False), flow_style=yaml_flow)
_run_test("application/yaml, application/json", yaml.dump(expected, default_flow_style=False), flow_style=yaml_flow)
_run_test(["application/yaml","application/json"], yaml.dump(expected, default_flow_style=False), flow_style=yaml_flow)
_run_test("application/json, application/yaml", json.dumps(expected))
_run_test("text/plain", "{None: False, 1: '2', 'ab': 'C'}")
def test_auto_decorator_with_params(self):
expected = {'ab':"C",1:"2",None:False}
class TestDecoratorWithParamsController(ActionController):
@auto(yaml_default_flow_style=True)
def test_action(self, request):
return expected
self._request_and_test(TestDecoratorWithParamsController, "test_action",
HTTP_ACCEPT="application/yaml",
expected_response=yaml.dump(expected,default_flow_style=True))
def test_json_decorator(self):
expected = {'ab':"C",1:"2",None:False}
class TestJSONDecoratorController(ActionController):
@json_action()
def test_action(self, request):
return expected
self._request_and_test(TestJSONDecoratorController, "test_action", expected_response=json.dumps(expected))
def test_before_filter_redirect(self):
returned = {"foo":"bar"}
class TestPrintController(ActionController):
def _before_filter(self, request):
return self._go(to_url="/baz/")
@json_action()
def test_action(self, request):
return returned
response = self._request_and_test(TestPrintController, "test_action", status_code=302)
self.assertEqual(response['Location'], "/baz/")
def test_before_filter_none(self):
returned = {"foo":"bar"}
class TestPrintController(ActionController):
def _before_filter(self, request):
return None
@json_action()
def test_action(self, request):
return returned
self._request_and_test(TestPrintController, "test_action", expected_response=json.dumps(returned))
def test_before_filter_dict(self):
returned = {"foo":"bar"}
class TestPrintController(ActionController):
def _before_filter(self, request):
return {"add":123}
@json_action()
def test_action(self, request):
return returned
self._request_and_test(TestPrintController, "test_action", expected_response=json.dumps({"foo":"bar", "add":123}))
def test_print(self):
expected = [1,2,3,4,5]
def _run_test(input, expect, **kwargs):
class TestPrintController(ActionController):
def test_action(self, request):
return self._print(input)
self._request_and_test(TestPrintController, "test_action", expected_response=expect)
_run_test(expected, str(expected))
_run_test("Bajs", "Bajs")
_run_test({"a":"b"}, str({"a":"b"}))
def test_as_yaml(self):
input = {'ab':"C",1:"2",None:False}
class TestAsYamlController(ActionController):
def test_action(self, request):
return self._as_yaml(input, default_flow_style=True)
self._request_and_test(TestAsYamlController, "test_action", expected_response=yaml.dump(input, default_flow_style=True))
def test_as_json(self):
input = {'ab':"C",1:"2",None:False}
class TestAsJsonController(ActionController):
def test_action(self, request):
return self._as_json(input)
self._request_and_test(TestAsJsonController, "test_action", expected_response=json.dumps(input))
def test_redirect_action(self):
class RedirectController(ActionController):
@json_action()
def second_action(self, request):
return {}
def redirect(self, request):
return self._go(to_url="/temporary/")
def redirect_permanent(self, request):
return self._go(to_url="/permanent/", permanent=True)
rf = RequestFactory()
request = rf.get('/redirecting/')
controller = RedirectController(site=None, request=request, helper_class=None, url_params=None)
with self.subTest('302'):
response = controller._call_action('redirect')
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], "/temporary/")
with self.subTest('301'):
response = controller._call_action('redirect_permanent')
self.assertEqual(response.status_code, 301)
self.assertEqual(response['Location'], "/permanent/")
def test_yaml_decorator(self):
expected = {'ab':"C",1:"2",None:False}
class TestYamlDecoratorController(ActionController):
yaml_default_flow_style=True
@yaml_action()
def test_action(self, request):
return expected
self._request_and_test(TestYamlDecoratorController, "test_action", expected_response=yaml.dump(expected,default_flow_style=True))
def test_yaml_decorator_with_flow_style(self):
expected = {'ab':"C",1:"2",None:False}
class TestYamlWithFlowController(ActionController):
@yaml_action(default_flow_style=True)
def test_action(self, request):
return expected
self._request_and_test(TestYamlWithFlowController, "test_action", expected_response=yaml.dump(expected,default_flow_style=True))
def test_yaml_decorator_with_flow_style_false(self):
input = {'ab':"C",1:"2",None:False}
class TestYamlDecoWithFalseFlowController(ActionController):
@yaml_action(default_flow_style=False)
def test_action(self, request):
return input
self._request_and_test(TestYamlDecoWithFalseFlowController, "test_action", expected_response=yaml.dump(input,default_flow_style=False))
def test_after_filter(self):
input = {'ab':"C",1:"2",None:False}
after = {'c':'z'}
class TestAfterFilterController(ActionController):
def _after_filter(self, request):
return after
@json_action()
def test_action(self, request):
return input
copied = input.copy()
copied.update(after)
self._request_and_test(
TestAfterFilterController,
"test_action",
expected_response=json.dumps(copied)
)
def test_after_filter_can_access_context(self):
"""
This verifies that `_after_filter` is run, that it has access to the
context that was returned by an `action`, and that `_after_filter` can modify
the context before returning it to the client.
after_filter takes value `foo` from our dictionary,
and assigns it to key `bar`. It should also replace the original
`foo` value with `bazinga`
:return:
"""
input = {'foo':"123"}
class TestAfterFilterContextController(ActionController):
def _after_filter(self, request):
after = {
'bar': self._template_context['foo'],
'foo': 'bazinga'
}
return after
@json_action()
def test_action(self, request):
return input
self._request_and_test(
TestAfterFilterContextController,
"test_action",
expected_response=json.dumps({"foo":'bazinga',"bar":"123"}))
def test_tuple_response_status_code(self):
expected = "HAIHAIHAI"
class TupleController(ActionController):
@json_action()
def three_three(self, request):
return expected, 333
rf = RequestFactory()
request = rf.get('/three_three/')
controller = TupleController(site=None, request=request, helper_class=None, url_params=None)
response = controller._call_action('three_three')
self.assertEqual(response.status_code, 333)
self.assertEqual(response.content.decode('utf8'), json.dumps(expected))
def test_as_json_tuple_response_status_code(self):
expected = "HAIHAIHAI"
class TupleController(ActionController):
def three_three(self, request):
return self._as_json(expected), 333
self._request_and_test(TupleController, "three_three", json.dumps(expected), 333)
def test_as_json_param_response_status_code(self):
expected = "HAIHAIHAI"
class TupleController(ActionController):
def three_three(self, request):
return self._as_json(expected, status_code=333)
self._request_and_test(TupleController, "three_three", json.dumps(expected), 333)
def test_param_tuple_status_code(self):
expected = "HAIHAIHAI"
class TupleController(ActionController):
def three_three(self, request):
return self._print(expected),334
self._request_and_test(TupleController, "three_three", expected, 334)
def test_as_json_param_and_tuple_response_status_code(self):
expected = "HAIHAIHAI"
class TupleController(ActionController):
def three_three(self, request):
return self._as_json(expected, status_code=333), 444
self._request_and_test(TupleController, "three_three", json.dumps(expected), 333)
| <filename>tests/test_controller_action_renderers.py
import json
import unittest
from io import BytesIO
import yaml
from django.http import HttpRequest
from django.test import RequestFactory, SimpleTestCase
from django.utils.encoding import force_bytes
from .duf_test_case import DUFTestCase
from django_url_framework.decorators import auto, json_action, yaml_action
from django_url_framework.controller import ActionController
class TestController(DUFTestCase):
def test_default_renderer_template(self):
action_response = {'data':'foo'}
class TestTemplateRendererController(ActionController):
def test_action(self, request):
return action_response
response = self._request_and_test(TestTemplateRendererController, "test_action",
expected_response="HTML:{data}".format(**action_response))
self.assertEqual(response['Content-Type'],"text/html; charset=utf-8")
def test_template_renderer_adds_request_to_template_context(self):
action_response = {'data':'foo'}
class TestTemplateRendererAddsRequestController(ActionController):
def test_has_request(self, request):
return action_response
response = self._request_and_test(TestTemplateRendererAddsRequestController, "test_has_request",
expected_response="This template <WSGIRequest: GET '/test/json/'>")
self.assertEqual(response['Content-Type'],"text/html; charset=utf-8")
def test_auto_json_yaml_str(self):
expected = {'ab':"C",1:"2",None:False}
yaml_flow = True
def _run_test(accept, expect, **kwargs):
class TestTestController(ActionController):
yaml_default_flow_style=yaml_flow
@auto()
def test_action(self, request):
return expected
self._request_and_test(TestTestController, "test_action", expected_response=expect, HTTP_ACCEPT=accept)
_run_test("application/json", json.dumps(expected))
_run_test("application/yaml", yaml.dump(expected, default_flow_style=yaml_flow).strip())
yaml_flow = False
_run_test("application/yaml", yaml.dump(expected, default_flow_style=False), flow_style=yaml_flow)
_run_test("application/yaml, application/json", yaml.dump(expected, default_flow_style=False), flow_style=yaml_flow)
_run_test(["application/yaml","application/json"], yaml.dump(expected, default_flow_style=False), flow_style=yaml_flow)
_run_test("application/json, application/yaml", json.dumps(expected))
_run_test("text/plain", "{None: False, 1: '2', 'ab': 'C'}")
def test_auto_decorator_with_params(self):
expected = {'ab':"C",1:"2",None:False}
class TestDecoratorWithParamsController(ActionController):
@auto(yaml_default_flow_style=True)
def test_action(self, request):
return expected
self._request_and_test(TestDecoratorWithParamsController, "test_action",
HTTP_ACCEPT="application/yaml",
expected_response=yaml.dump(expected,default_flow_style=True))
def test_json_decorator(self):
expected = {'ab':"C",1:"2",None:False}
class TestJSONDecoratorController(ActionController):
@json_action()
def test_action(self, request):
return expected
self._request_and_test(TestJSONDecoratorController, "test_action", expected_response=json.dumps(expected))
def test_before_filter_redirect(self):
returned = {"foo":"bar"}
class TestPrintController(ActionController):
def _before_filter(self, request):
return self._go(to_url="/baz/")
@json_action()
def test_action(self, request):
return returned
response = self._request_and_test(TestPrintController, "test_action", status_code=302)
self.assertEqual(response['Location'], "/baz/")
def test_before_filter_none(self):
returned = {"foo":"bar"}
class TestPrintController(ActionController):
def _before_filter(self, request):
return None
@json_action()
def test_action(self, request):
return returned
self._request_and_test(TestPrintController, "test_action", expected_response=json.dumps(returned))
def test_before_filter_dict(self):
returned = {"foo":"bar"}
class TestPrintController(ActionController):
def _before_filter(self, request):
return {"add":123}
@json_action()
def test_action(self, request):
return returned
self._request_and_test(TestPrintController, "test_action", expected_response=json.dumps({"foo":"bar", "add":123}))
def test_print(self):
expected = [1,2,3,4,5]
def _run_test(input, expect, **kwargs):
class TestPrintController(ActionController):
def test_action(self, request):
return self._print(input)
self._request_and_test(TestPrintController, "test_action", expected_response=expect)
_run_test(expected, str(expected))
_run_test("Bajs", "Bajs")
_run_test({"a":"b"}, str({"a":"b"}))
def test_as_yaml(self):
input = {'ab':"C",1:"2",None:False}
class TestAsYamlController(ActionController):
def test_action(self, request):
return self._as_yaml(input, default_flow_style=True)
self._request_and_test(TestAsYamlController, "test_action", expected_response=yaml.dump(input, default_flow_style=True))
def test_as_json(self):
input = {'ab':"C",1:"2",None:False}
class TestAsJsonController(ActionController):
def test_action(self, request):
return self._as_json(input)
self._request_and_test(TestAsJsonController, "test_action", expected_response=json.dumps(input))
def test_redirect_action(self):
class RedirectController(ActionController):
@json_action()
def second_action(self, request):
return {}
def redirect(self, request):
return self._go(to_url="/temporary/")
def redirect_permanent(self, request):
return self._go(to_url="/permanent/", permanent=True)
rf = RequestFactory()
request = rf.get('/redirecting/')
controller = RedirectController(site=None, request=request, helper_class=None, url_params=None)
with self.subTest('302'):
response = controller._call_action('redirect')
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], "/temporary/")
with self.subTest('301'):
response = controller._call_action('redirect_permanent')
self.assertEqual(response.status_code, 301)
self.assertEqual(response['Location'], "/permanent/")
def test_yaml_decorator(self):
expected = {'ab':"C",1:"2",None:False}
class TestYamlDecoratorController(ActionController):
yaml_default_flow_style=True
@yaml_action()
def test_action(self, request):
return expected
self._request_and_test(TestYamlDecoratorController, "test_action", expected_response=yaml.dump(expected,default_flow_style=True))
def test_yaml_decorator_with_flow_style(self):
expected = {'ab':"C",1:"2",None:False}
class TestYamlWithFlowController(ActionController):
@yaml_action(default_flow_style=True)
def test_action(self, request):
return expected
self._request_and_test(TestYamlWithFlowController, "test_action", expected_response=yaml.dump(expected,default_flow_style=True))
def test_yaml_decorator_with_flow_style_false(self):
input = {'ab':"C",1:"2",None:False}
class TestYamlDecoWithFalseFlowController(ActionController):
@yaml_action(default_flow_style=False)
def test_action(self, request):
return input
self._request_and_test(TestYamlDecoWithFalseFlowController, "test_action", expected_response=yaml.dump(input,default_flow_style=False))
def test_after_filter(self):
input = {'ab':"C",1:"2",None:False}
after = {'c':'z'}
class TestAfterFilterController(ActionController):
def _after_filter(self, request):
return after
@json_action()
def test_action(self, request):
return input
copied = input.copy()
copied.update(after)
self._request_and_test(
TestAfterFilterController,
"test_action",
expected_response=json.dumps(copied)
)
def test_after_filter_can_access_context(self):
"""
This verifies that `_after_filter` is run, that it has access to the
context that was returned by an `action`, and that `_after_filter` can modify
the context before returning it to the client.
after_filter takes value `foo` from our dictionary,
and assigns it to key `bar`. It should also replace the original
`foo` value with `bazinga`
:return:
"""
input = {'foo':"123"}
class TestAfterFilterContextController(ActionController):
def _after_filter(self, request):
after = {
'bar': self._template_context['foo'],
'foo': 'bazinga'
}
return after
@json_action()
def test_action(self, request):
return input
self._request_and_test(
TestAfterFilterContextController,
"test_action",
expected_response=json.dumps({"foo":'bazinga',"bar":"123"}))
def test_tuple_response_status_code(self):
expected = "HAIHAIHAI"
class TupleController(ActionController):
@json_action()
def three_three(self, request):
return expected, 333
rf = RequestFactory()
request = rf.get('/three_three/')
controller = TupleController(site=None, request=request, helper_class=None, url_params=None)
response = controller._call_action('three_three')
self.assertEqual(response.status_code, 333)
self.assertEqual(response.content.decode('utf8'), json.dumps(expected))
def test_as_json_tuple_response_status_code(self):
expected = "HAIHAIHAI"
class TupleController(ActionController):
def three_three(self, request):
return self._as_json(expected), 333
self._request_and_test(TupleController, "three_three", json.dumps(expected), 333)
def test_as_json_param_response_status_code(self):
expected = "HAIHAIHAI"
class TupleController(ActionController):
def three_three(self, request):
return self._as_json(expected, status_code=333)
self._request_and_test(TupleController, "three_three", json.dumps(expected), 333)
def test_param_tuple_status_code(self):
expected = "HAIHAIHAI"
class TupleController(ActionController):
def three_three(self, request):
return self._print(expected),334
self._request_and_test(TupleController, "three_three", expected, 334)
def test_as_json_param_and_tuple_response_status_code(self):
expected = "HAIHAIHAI"
class TupleController(ActionController):
def three_three(self, request):
return self._as_json(expected, status_code=333), 444
self._request_and_test(TupleController, "three_three", json.dumps(expected), 333)
| en | 0.912795 | #x27;/test/json/'>") This verifies that `_after_filter` is run, that it has access to the context that was returned by an `action`, and that `_after_filter` can modify the context before returning it to the client. after_filter takes value `foo` from our dictionary, and assigns it to key `bar`. It should also replace the original `foo` value with `bazinga` :return: | 2.406145 | 2 |
Notebook_gs_svc.py | alejandrox1/tweet_authorship | 0 | 6616569 | from Notebook_helperfunctions import *
from sklearn.svm import SVC
### INPUT
args = cml()
f_authorship = 'users/authorship.csv'
### PREPROCESSING
df = pd.read_csv(f_authorship)
df.drop_duplicates()
np.random.seed(0)
df = df.reindex(np.random.permutation(df.index))
#df['text'] = df['text'].apply(preprocessor)
X = df.loc[:, 'text'].values
y = df.loc[:, 'user_id'].values
le = LabelEncoder()
y = le.fit_transform(y)
# Train test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20,
random_state=1)
### Grid Search CV
tfidf = TfidfVectorizer(strip_accents=None,
lowercase=False,
preprocessor=None)
svc_tfidf = Pipeline([('vect', tfidf),
('clf', SVC(random_state=1))])
ngram_range = [(1, args.ngram)]
param_range = [0.01, 0.1, 1.0, 10.0, 100.0, 1000.0]
param_grid = [{'vect__ngram_range': ngram_range,
'vect__stop_words': [stop, None],
'vect__tokenizer': [tokenizer, tokenizer_porter, tokenizer_twitter],
'clf__C': param_range,
'clf__kernel': ['linear']},
{'vect__ngram_range': ngram_range,
'vect__stop_words': [stop, None],
'vect__tokenizer': [tokenizer, tokenizer_porter, tokenizer_twitter],
'vect__use_idf':[False],
'vect__norm':[None],
'clf__C': param_range,
'clf__kernel': ['linear']},
{'vect__ngram_range': ngram_range,
'vect__stop_words': [stop, None],
'vect__tokenizer': [tokenizer, tokenizer_porter, tokenizer_twitter],
'clf__C': param_range,
'clf__gamma': param_range,
'clf__kernel': ['rbf']},
{'vect__ngram_range': ngram_range,
'vect__stop_words': [stop, None],
'vect__tokenizer': [tokenizer, tokenizer_porter, tokenizer_twitter],
'vect__use_idf':[False],
'vect__norm':[None],
'clf__C': param_range,
'clf__gamma': param_range,
'clf__kernel': ['rbf']}]
gs_svc_tfidf = GridSearchCV(estimator=svc_tfidf,
param_grid=param_grid,
scoring='accuracy',
cv=5,
verbose=1,
n_jobs=args.jobs)
svc_time0 = time.time()
gs_svc_tfidf.fit(X_train, y_train)
svc_time1 = time.time()
print('EXECUTION TIME for svc gs : {} secs\n\n'.format(svc_time1 - svc_time0))
print('Best parameter set: {} \n'.format(gs_svc_tfidf.best_params_))
print('CV Accuracy: {:.3f}'.format(gs_svc_tfidf.best_score_))
clf_svc = gs_svc_tfidf.best_estimator_
print('Test Accuracy: {:.3f}'.format(clf_svc.score(X_test, y_test)))
# Store model
dest = os.path.join('pkl_objects')
if os.path.exists(dest):
pickle.dump(gs_svc_tfidf, open(os.path.join(dest, str(args.ngram)+'gs_svc.pkl'), 'wb'),
protocol=4)
| from Notebook_helperfunctions import *
from sklearn.svm import SVC
### INPUT
args = cml()
f_authorship = 'users/authorship.csv'
### PREPROCESSING
df = pd.read_csv(f_authorship)
df.drop_duplicates()
np.random.seed(0)
df = df.reindex(np.random.permutation(df.index))
#df['text'] = df['text'].apply(preprocessor)
X = df.loc[:, 'text'].values
y = df.loc[:, 'user_id'].values
le = LabelEncoder()
y = le.fit_transform(y)
# Train test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20,
random_state=1)
### Grid Search CV
tfidf = TfidfVectorizer(strip_accents=None,
lowercase=False,
preprocessor=None)
svc_tfidf = Pipeline([('vect', tfidf),
('clf', SVC(random_state=1))])
ngram_range = [(1, args.ngram)]
param_range = [0.01, 0.1, 1.0, 10.0, 100.0, 1000.0]
param_grid = [{'vect__ngram_range': ngram_range,
'vect__stop_words': [stop, None],
'vect__tokenizer': [tokenizer, tokenizer_porter, tokenizer_twitter],
'clf__C': param_range,
'clf__kernel': ['linear']},
{'vect__ngram_range': ngram_range,
'vect__stop_words': [stop, None],
'vect__tokenizer': [tokenizer, tokenizer_porter, tokenizer_twitter],
'vect__use_idf':[False],
'vect__norm':[None],
'clf__C': param_range,
'clf__kernel': ['linear']},
{'vect__ngram_range': ngram_range,
'vect__stop_words': [stop, None],
'vect__tokenizer': [tokenizer, tokenizer_porter, tokenizer_twitter],
'clf__C': param_range,
'clf__gamma': param_range,
'clf__kernel': ['rbf']},
{'vect__ngram_range': ngram_range,
'vect__stop_words': [stop, None],
'vect__tokenizer': [tokenizer, tokenizer_porter, tokenizer_twitter],
'vect__use_idf':[False],
'vect__norm':[None],
'clf__C': param_range,
'clf__gamma': param_range,
'clf__kernel': ['rbf']}]
gs_svc_tfidf = GridSearchCV(estimator=svc_tfidf,
param_grid=param_grid,
scoring='accuracy',
cv=5,
verbose=1,
n_jobs=args.jobs)
svc_time0 = time.time()
gs_svc_tfidf.fit(X_train, y_train)
svc_time1 = time.time()
print('EXECUTION TIME for svc gs : {} secs\n\n'.format(svc_time1 - svc_time0))
print('Best parameter set: {} \n'.format(gs_svc_tfidf.best_params_))
print('CV Accuracy: {:.3f}'.format(gs_svc_tfidf.best_score_))
clf_svc = gs_svc_tfidf.best_estimator_
print('Test Accuracy: {:.3f}'.format(clf_svc.score(X_test, y_test)))
# Store model
dest = os.path.join('pkl_objects')
if os.path.exists(dest):
pickle.dump(gs_svc_tfidf, open(os.path.join(dest, str(args.ngram)+'gs_svc.pkl'), 'wb'),
protocol=4)
| en | 0.218899 | ### INPUT ### PREPROCESSING #df['text'] = df['text'].apply(preprocessor) # Train test split ### Grid Search CV # Store model | 2.41246 | 2 |
src/space/space.py | hgf777-br/space-flight-news-20210823 | 0 | 6616570 | import requests
BASE_URL = "https://api.spaceflightnewsapi.net/v3"
class Article:
def __init__(
self,
id: int,
featured: bool,
title: str,
url: str,
imageUrl: str,
newsSite: str,
summary: str,
publishedAt: str,
updatedAt: str,
launches: list[dict],
events: list[dict]
) -> None:
self.id = id
self.featured = featured
self.title = title
self.url = url
self.imageUrl = imageUrl
self.newsSite = newsSite
self.summary = summary
self.publishedAt = publishedAt
self.updatedAt = updatedAt
self.launches = launches
self.events = events
def listar_dados(self):
res = [
self.id,
self.title,
self.url,
self.imageUrl,
self.newsSite,
self.summary,
self.publishedAt,
self.updatedAt,
self.featured
]
if len(self.launches) == 0:
res.extend(["", ""])
else:
res.extend(self.launches[0].values())
if len(self.events) == 0:
res.extend(["", ""])
else:
res.extend(self.events[0].values())
return res
class Space():
def articles_count(self) -> int:
r = requests.get(BASE_URL + "/articles/count")
if r.status_code == 200:
return int(r.text)
else:
return -1
def articles(self, count: int) -> list:
r = requests.get(BASE_URL + f"/articles?_limit={count}&_sort=id")
if r.status_code == 200:
return [Article(**a) for a in r.json()]
else:
return []
def new_articles(self, id: int) -> list:
r = requests.get(BASE_URL + f"/articles?_sort=id&id_gt={id}")
if r.status_code == 200:
return [Article(**a) for a in r.json()]
else:
return []
| import requests
BASE_URL = "https://api.spaceflightnewsapi.net/v3"
class Article:
def __init__(
self,
id: int,
featured: bool,
title: str,
url: str,
imageUrl: str,
newsSite: str,
summary: str,
publishedAt: str,
updatedAt: str,
launches: list[dict],
events: list[dict]
) -> None:
self.id = id
self.featured = featured
self.title = title
self.url = url
self.imageUrl = imageUrl
self.newsSite = newsSite
self.summary = summary
self.publishedAt = publishedAt
self.updatedAt = updatedAt
self.launches = launches
self.events = events
def listar_dados(self):
res = [
self.id,
self.title,
self.url,
self.imageUrl,
self.newsSite,
self.summary,
self.publishedAt,
self.updatedAt,
self.featured
]
if len(self.launches) == 0:
res.extend(["", ""])
else:
res.extend(self.launches[0].values())
if len(self.events) == 0:
res.extend(["", ""])
else:
res.extend(self.events[0].values())
return res
class Space():
def articles_count(self) -> int:
r = requests.get(BASE_URL + "/articles/count")
if r.status_code == 200:
return int(r.text)
else:
return -1
def articles(self, count: int) -> list:
r = requests.get(BASE_URL + f"/articles?_limit={count}&_sort=id")
if r.status_code == 200:
return [Article(**a) for a in r.json()]
else:
return []
def new_articles(self, id: int) -> list:
r = requests.get(BASE_URL + f"/articles?_sort=id&id_gt={id}")
if r.status_code == 200:
return [Article(**a) for a in r.json()]
else:
return []
| none | 1 | 2.776592 | 3 | |
qiita_pet/handlers/api_proxy/util.py | smruthi98/qiita | 96 | 6616571 | <filename>qiita_pet/handlers/api_proxy/util.py
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from os.path import exists, join
from qiita_db.exceptions import QiitaDBUnknownIDError
from qiita_db.study import Study
from qiita_db.user import User
from qiita_db.util import get_mountpoint
def check_access(study_id, user_id):
"""Checks if user given has access to the study given
Parameters
----------
study_id : int
ID of the study to check access to
user_id : str
ID of the user to check access for
Returns
-------
dict
Empty dict if access allowed, else a dict in the form
{'status': 'error',
'message': reason for error}
"""
try:
study = Study(int(study_id))
except QiitaDBUnknownIDError:
return {'status': 'error',
'message': 'Study does not exist'}
if not study.has_access(User(user_id)):
return {'status': 'error',
'message': 'User does not have access to study'}
return {}
def check_fp(study_id, filename):
"""Check whether an uploaded file exists
Parameters
----------
study_id : int
Study file uploaded to
filename : str
name of the uploaded file
Returns
-------
dict
{'status': status,
'message': msg,
'file': str}
file contains full filepath if status is success, otherwise it contains
the filename
"""
# Get the uploads folder
_, base_fp = get_mountpoint("uploads")[0]
# Get the path of the sample template in the uploads folder
fp_rsp = join(base_fp, str(study_id), filename)
if not exists(fp_rsp):
# The file does not exist, fail nicely
return {'status': 'error',
'message': 'file does not exist',
'file': filename}
return {'status': 'success',
'message': '',
'file': fp_rsp}
| <filename>qiita_pet/handlers/api_proxy/util.py
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from os.path import exists, join
from qiita_db.exceptions import QiitaDBUnknownIDError
from qiita_db.study import Study
from qiita_db.user import User
from qiita_db.util import get_mountpoint
def check_access(study_id, user_id):
"""Checks if user given has access to the study given
Parameters
----------
study_id : int
ID of the study to check access to
user_id : str
ID of the user to check access for
Returns
-------
dict
Empty dict if access allowed, else a dict in the form
{'status': 'error',
'message': reason for error}
"""
try:
study = Study(int(study_id))
except QiitaDBUnknownIDError:
return {'status': 'error',
'message': 'Study does not exist'}
if not study.has_access(User(user_id)):
return {'status': 'error',
'message': 'User does not have access to study'}
return {}
def check_fp(study_id, filename):
"""Check whether an uploaded file exists
Parameters
----------
study_id : int
Study file uploaded to
filename : str
name of the uploaded file
Returns
-------
dict
{'status': status,
'message': msg,
'file': str}
file contains full filepath if status is success, otherwise it contains
the filename
"""
# Get the uploads folder
_, base_fp = get_mountpoint("uploads")[0]
# Get the path of the sample template in the uploads folder
fp_rsp = join(base_fp, str(study_id), filename)
if not exists(fp_rsp):
# The file does not exist, fail nicely
return {'status': 'error',
'message': 'file does not exist',
'file': filename}
return {'status': 'success',
'message': '',
'file': fp_rsp}
| en | 0.692496 | # ----------------------------------------------------------------------------- # Copyright (c) 2014--, The Qiita Development Team. # # Distributed under the terms of the BSD 3-clause License. # # The full license is in the file LICENSE, distributed with this software. # ----------------------------------------------------------------------------- Checks if user given has access to the study given Parameters ---------- study_id : int ID of the study to check access to user_id : str ID of the user to check access for Returns ------- dict Empty dict if access allowed, else a dict in the form {'status': 'error', 'message': reason for error} Check whether an uploaded file exists Parameters ---------- study_id : int Study file uploaded to filename : str name of the uploaded file Returns ------- dict {'status': status, 'message': msg, 'file': str} file contains full filepath if status is success, otherwise it contains the filename # Get the uploads folder # Get the path of the sample template in the uploads folder # The file does not exist, fail nicely | 2.304156 | 2 |
preprocess/extract.py | zhengyima/CoLAKE | 73 | 6616572 | from bs4 import BeautifulSoup
import sys
from urllib import parse
import os
from multiprocessing import Pool
input_folder = "../pretrain_data/output"
file_list = []
for path, _, filenames in os.walk(input_folder):
for filename in filenames:
file_list.append(os.path.join(path, filename))
print(len(file_list))
def run_proc(idx, n, file_list):
for i in range(len(file_list)):
if i % n == idx:
input_name = file_list[i]
print('{}: {}'.format(i, input_name))
target = input_name.replace('pretrain_data/output', "pretrain_data/ann")
folder = '/'.join(target.split('/')[:-1])
if not os.path.exists(folder):
os.makedirs(folder)
soup = BeautifulSoup(open(input_name, encoding='utf-8'), features="html5lib")
docs = soup.find_all('doc')
fout = open(target, 'w', encoding='utf-8')
for doc in docs:
content = doc.get_text(" sepsepsep ")
while content[0] == "\n":
content = content[1:]
content = [x.strip() for x in content.split("\n")]
content = "".join(content[1:])
lookup = []
for x in doc.find_all("a"):
if x.get('href') is not None:
lookup.append((x.get_text().strip(), parse.unquote(x.get('href'))))
# lookup = [(x.get_text().strip(), parse.unquote(x.get('href'))) for x in doc.find_all("a")]
lookup = "[_end_]".join(["[_map_]".join(x) for x in lookup])
fout.write(content+"[_end_]"+lookup+"\n")
fout.close()
n = int(sys.argv[1])
p = Pool(n)
for i in range(n):
p.apply_async(run_proc, args=(i, n, file_list))
p.close()
p.join() | from bs4 import BeautifulSoup
import sys
from urllib import parse
import os
from multiprocessing import Pool
input_folder = "../pretrain_data/output"
file_list = []
for path, _, filenames in os.walk(input_folder):
for filename in filenames:
file_list.append(os.path.join(path, filename))
print(len(file_list))
def run_proc(idx, n, file_list):
for i in range(len(file_list)):
if i % n == idx:
input_name = file_list[i]
print('{}: {}'.format(i, input_name))
target = input_name.replace('pretrain_data/output', "pretrain_data/ann")
folder = '/'.join(target.split('/')[:-1])
if not os.path.exists(folder):
os.makedirs(folder)
soup = BeautifulSoup(open(input_name, encoding='utf-8'), features="html5lib")
docs = soup.find_all('doc')
fout = open(target, 'w', encoding='utf-8')
for doc in docs:
content = doc.get_text(" sepsepsep ")
while content[0] == "\n":
content = content[1:]
content = [x.strip() for x in content.split("\n")]
content = "".join(content[1:])
lookup = []
for x in doc.find_all("a"):
if x.get('href') is not None:
lookup.append((x.get_text().strip(), parse.unquote(x.get('href'))))
# lookup = [(x.get_text().strip(), parse.unquote(x.get('href'))) for x in doc.find_all("a")]
lookup = "[_end_]".join(["[_map_]".join(x) for x in lookup])
fout.write(content+"[_end_]"+lookup+"\n")
fout.close()
n = int(sys.argv[1])
p = Pool(n)
for i in range(n):
p.apply_async(run_proc, args=(i, n, file_list))
p.close()
p.join() | en | 0.234489 | # lookup = [(x.get_text().strip(), parse.unquote(x.get('href'))) for x in doc.find_all("a")] | 2.419472 | 2 |
Framework/logger.py | Phinner/DiscordBot-Framework | 1 | 6616573 | <reponame>Phinner/DiscordBot-Framework
from os.path import exists
from time import gmtime, strftime
class CustomLogger(object):
def __init__(self, client):
"""
Logs information about the bot activity.
levels can be enabled/disabled.
"""
self.levels = {
"TRACE": True,
"DEBUG": True,
"INFO": True,
"WARNING": True,
"ERROR": True,
"EXCEPTION": True,
"CRITICAL": True
}
self.client = client
self._dailylogger = True
self._console_printer = True
self._message = "{time}[{level}]> {msg}"
self._time_formatter = "[%Y-%m-%d][%H:%M:%S]"
# --------------------------------------------------------------------------- #
@property
def dailylogger(self):
"""
dailylogger is a boolean.
If it's True, the bot will split it's logs by changing the log file every day.
The logs are named in the "mm-dd-yyyy.log" format.
"""
return self._dailylogger
@dailylogger.setter
def dailylogger(self, value):
if type(value) is bool:
self._dailylogger = value
else: TypeError(f"{value} is not a boolean.")
# --------------------------------------------------------------------------- #
@property
def console_printer(self):
"""
console_printer is a boolean.
If it's True, the logger will print the log in the console.
"""
return self._dailylogger
@console_printer.setter
def console_printer(self, value):
if type(value) is bool:
self._console_printer = value
else: TypeError(f"{value} is not a boolean.")
# --------------------------------------------------------------------------- #
@property
def message(self):
"""
This field is for setting the message format of the logger.
The default formatter accept [time, level, msg].
"""
return self._message
@message.setter
def message(self, value):
if type(value) is str:
self._message = value
else: TypeError(f"{value} is not a string.")
# --------------------------------------------------------------------------- #
@property
def time_formatter(self):
"""
time_format is the way time is displayed when the bot logs,
check https://docs.python.org/3/library/time.html#time.strftime to see how to modify it.
"""
return self._time_formatter
@time_formatter.setter
def time_formatter(self, value):
if type(value) is str:
self._time_formatter = value
else: TypeError(f"{value} is not a string.")
# --------------------------------------------------------------------------- #
def getLogFile(self):
"""
This function is called before the bot logs something because it returns the name of the log file.
"""
# Determines the name of the log
file_name = self.client.LOG_PATH
if self._dailylogger:
file_name += f"\\{strftime('%m-%d-%Y', gmtime())}.log"
else:
file_name += "\\Log.log"
# Checks if the log file already exist. If false, it creates a new one.
if not exists(file_name):
open(file_name, "w").close()
return file_name
def log(self, level, message):
if not self.levels[level]:
return
with open(self.getLogFile(), "a") as file:
log_message = self.message.format(level=level, msg=message, time=strftime(self._time_formatter, gmtime()))
file.write(log_message + "\n")
if self.console_printer:
print(log_message)
# --------------------------------------------------------------------------- #
def TRACE(self, message):
self.log("TRACE", message)
def DEBUG(self, message):
self.log("DEBUG", message)
def INFO(self, message):
self.log("INFO", message)
def WARNING(self, message):
self.log("WARNING", message)
def ERROR(self, message):
self.log("ERROR", message)
def EXCEPTION(self, message):
self.log("EXCEPTION", message)
def CRITICAL(self, message):
self.log("CRITICAL", message)
| from os.path import exists
from time import gmtime, strftime
class CustomLogger(object):
def __init__(self, client):
"""
Logs information about the bot activity.
levels can be enabled/disabled.
"""
self.levels = {
"TRACE": True,
"DEBUG": True,
"INFO": True,
"WARNING": True,
"ERROR": True,
"EXCEPTION": True,
"CRITICAL": True
}
self.client = client
self._dailylogger = True
self._console_printer = True
self._message = "{time}[{level}]> {msg}"
self._time_formatter = "[%Y-%m-%d][%H:%M:%S]"
# --------------------------------------------------------------------------- #
@property
def dailylogger(self):
"""
dailylogger is a boolean.
If it's True, the bot will split it's logs by changing the log file every day.
The logs are named in the "mm-dd-yyyy.log" format.
"""
return self._dailylogger
@dailylogger.setter
def dailylogger(self, value):
if type(value) is bool:
self._dailylogger = value
else: TypeError(f"{value} is not a boolean.")
# --------------------------------------------------------------------------- #
@property
def console_printer(self):
"""
console_printer is a boolean.
If it's True, the logger will print the log in the console.
"""
return self._dailylogger
@console_printer.setter
def console_printer(self, value):
if type(value) is bool:
self._console_printer = value
else: TypeError(f"{value} is not a boolean.")
# --------------------------------------------------------------------------- #
@property
def message(self):
"""
This field is for setting the message format of the logger.
The default formatter accept [time, level, msg].
"""
return self._message
@message.setter
def message(self, value):
if type(value) is str:
self._message = value
else: TypeError(f"{value} is not a string.")
# --------------------------------------------------------------------------- #
@property
def time_formatter(self):
"""
time_format is the way time is displayed when the bot logs,
check https://docs.python.org/3/library/time.html#time.strftime to see how to modify it.
"""
return self._time_formatter
@time_formatter.setter
def time_formatter(self, value):
if type(value) is str:
self._time_formatter = value
else: TypeError(f"{value} is not a string.")
# --------------------------------------------------------------------------- #
def getLogFile(self):
"""
This function is called before the bot logs something because it returns the name of the log file.
"""
# Determines the name of the log
file_name = self.client.LOG_PATH
if self._dailylogger:
file_name += f"\\{strftime('%m-%d-%Y', gmtime())}.log"
else:
file_name += "\\Log.log"
# Checks if the log file already exist. If false, it creates a new one.
if not exists(file_name):
open(file_name, "w").close()
return file_name
def log(self, level, message):
if not self.levels[level]:
return
with open(self.getLogFile(), "a") as file:
log_message = self.message.format(level=level, msg=message, time=strftime(self._time_formatter, gmtime()))
file.write(log_message + "\n")
if self.console_printer:
print(log_message)
# --------------------------------------------------------------------------- #
def TRACE(self, message):
self.log("TRACE", message)
def DEBUG(self, message):
self.log("DEBUG", message)
def INFO(self, message):
self.log("INFO", message)
def WARNING(self, message):
self.log("WARNING", message)
def ERROR(self, message):
self.log("ERROR", message)
def EXCEPTION(self, message):
self.log("EXCEPTION", message)
def CRITICAL(self, message):
self.log("CRITICAL", message) | en | 0.548004 | Logs information about the bot activity. levels can be enabled/disabled. # --------------------------------------------------------------------------- # dailylogger is a boolean. If it's True, the bot will split it's logs by changing the log file every day. The logs are named in the "mm-dd-yyyy.log" format. # --------------------------------------------------------------------------- # console_printer is a boolean. If it's True, the logger will print the log in the console. # --------------------------------------------------------------------------- # This field is for setting the message format of the logger. The default formatter accept [time, level, msg]. # --------------------------------------------------------------------------- # time_format is the way time is displayed when the bot logs, check https://docs.python.org/3/library/time.html#time.strftime to see how to modify it. # --------------------------------------------------------------------------- # This function is called before the bot logs something because it returns the name of the log file. # Determines the name of the log # Checks if the log file already exist. If false, it creates a new one. # --------------------------------------------------------------------------- # | 3.042701 | 3 |
backend/server.py | naterush/feast | 0 | 6616574 | <gh_stars>0
# Python 3 server example
from http.server import SimpleHTTPRequestHandler, HTTPServer
import json
from Cart import InstaCart
hostName = "localhost"
serverPort = 8080
cart = InstaCart('<EMAIL>', '../password-instacart.txt')
cart.login()
DIRECTORY = '../ui/out'
class MyServer(SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, directory=DIRECTORY, **kwargs)
def do_OPTIONS(self):
self.send_response(200, "ok")
self.end_headers()
def do_GET(self):
if self.path.startswith('/getcurrentrecipe'):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(cart.to_JSON().encode('utf8'))
return
else:
# Serve the files, doh
super().do_GET()
def do_POST(self):
if self.path.startswith('/setcurrentrecipe'):
content = json.loads(self.rfile.read(int(self.headers['Content-Length'])).decode('utf8'))
url = content['url']
print("Adding url")
# Add the recipe
cart.add_recipe(url)
# Send the response
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
if self.path.startswith('/toggle_ingredient'):
content = json.loads(self.rfile.read(int(self.headers['Content-Length'])).decode('utf8'))
index = content['index']
cart.toggle_ingredient(index)
# Send the response
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
if self.path.startswith('/clear'):
cart.clear()
# Send the response
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
if __name__ == "__main__":
webServer = HTTPServer((hostName, serverPort), MyServer)
print("Server started http://%s:%s" % (hostName, serverPort))
try:
webServer.serve_forever()
except KeyboardInterrupt:
pass
webServer.server_close()
print("Server stopped.")
# TODO: 1. serve the folder that's at out
# TODO: | # Python 3 server example
from http.server import SimpleHTTPRequestHandler, HTTPServer
import json
from Cart import InstaCart
hostName = "localhost"
serverPort = 8080
cart = InstaCart('<EMAIL>', '../password-instacart.txt')
cart.login()
DIRECTORY = '../ui/out'
class MyServer(SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, directory=DIRECTORY, **kwargs)
def do_OPTIONS(self):
self.send_response(200, "ok")
self.end_headers()
def do_GET(self):
if self.path.startswith('/getcurrentrecipe'):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(cart.to_JSON().encode('utf8'))
return
else:
# Serve the files, doh
super().do_GET()
def do_POST(self):
if self.path.startswith('/setcurrentrecipe'):
content = json.loads(self.rfile.read(int(self.headers['Content-Length'])).decode('utf8'))
url = content['url']
print("Adding url")
# Add the recipe
cart.add_recipe(url)
# Send the response
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
if self.path.startswith('/toggle_ingredient'):
content = json.loads(self.rfile.read(int(self.headers['Content-Length'])).decode('utf8'))
index = content['index']
cart.toggle_ingredient(index)
# Send the response
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
if self.path.startswith('/clear'):
cart.clear()
# Send the response
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
if __name__ == "__main__":
webServer = HTTPServer((hostName, serverPort), MyServer)
print("Server started http://%s:%s" % (hostName, serverPort))
try:
webServer.serve_forever()
except KeyboardInterrupt:
pass
webServer.server_close()
print("Server stopped.")
# TODO: 1. serve the folder that's at out
# TODO: | en | 0.781664 | # Python 3 server example # Serve the files, doh # Add the recipe # Send the response # Send the response # Send the response # TODO: 1. serve the folder that's at out # TODO: | 3.261389 | 3 |
chapter_12/QASystem/generate_question.py | tanzhou2002/SourceCodeofMongoRedis | 183 | 6616575 | <reponame>tanzhou2002/SourceCodeofMongoRedis
import pymongo
from bson import ObjectId
handler = pymongo.MongoClient().qa_system.question
question = [{'_id': ObjectId('5ead107ed1174b0308255c35'),
'ask_time': '2018-07-23 12:18:11',
'author': '王小一',
'detail': '请问1+1等于几?',
'title': '1+1=?',
'vote_down': 100,
'vote_up': 0},
{'_id': ObjectId('5ead107ed1174b0308255c36'),
'ask_time': '2018-07-23 12:18:11',
'author': '张小二',
'detail': '这句话出自哪里?',
'title': '为什么说42 is the answer of all?',
'vote_down': 0,
'vote_up': 100},
{'_id': ObjectId('5ead107ed1174b0308255c37'),
'ask_time': '2018-07-23 12:18:11',
'author': '刘小三',
'detail': '明天会下雨吗?',
'title': '明天天气如何?',
'vote_down': 10,
'vote_up': 10},
{'_id': ObjectId('5ead107ed1174b0308255c38'),
'ask_time': '2018-07-23 12:18:11',
'author': '旺小四',
'detail': '还有,这是谁写的诗?',
'title': '此时相忘不相闻下一句是什么?',
'vote_down': 3,
'vote_up': 100},
{'_id': ObjectId('5ead107ed1174b0308255c39'),
'ask_time': '2018-07-23 12:18:11',
'author': '赵小五',
'detail': '孵蛋除了温度还需要什么?',
'title': '把微波炉温度调低一些,可以孵鸡蛋吗?',
'vote_down': 3,
'vote_up': 23},
{'_id': ObjectId('5ead107ed1174b0308255c3a'),
'ask_time': '2018-07-23 12:18:11',
'author': '朱小六',
'detail': '请回答具体原因。',
'title': '四大名著你喜欢哪一本?',
'vote_down': 2,
'vote_up': 70},
{'_id': ObjectId('5ead107ed1174b0308255c3b'),
'ask_time': '2018-07-23 12:18:11',
'author': '马小七',
'detail': '这本书的作者又是是呢?',
'title': '你知道明朝时期的四大名著,除了《西游记》《水浒传》和《三国演义》还有一本是什么吗?',
'vote_down': 16,
'vote_up': 120}]
handler.insert_many(question) | import pymongo
from bson import ObjectId
handler = pymongo.MongoClient().qa_system.question
question = [{'_id': ObjectId('5ead107ed1174b0308255c35'),
'ask_time': '2018-07-23 12:18:11',
'author': '王小一',
'detail': '请问1+1等于几?',
'title': '1+1=?',
'vote_down': 100,
'vote_up': 0},
{'_id': ObjectId('5ead107ed1174b0308255c36'),
'ask_time': '2018-07-23 12:18:11',
'author': '张小二',
'detail': '这句话出自哪里?',
'title': '为什么说42 is the answer of all?',
'vote_down': 0,
'vote_up': 100},
{'_id': ObjectId('5ead107ed1174b0308255c37'),
'ask_time': '2018-07-23 12:18:11',
'author': '刘小三',
'detail': '明天会下雨吗?',
'title': '明天天气如何?',
'vote_down': 10,
'vote_up': 10},
{'_id': ObjectId('5ead107ed1174b0308255c38'),
'ask_time': '2018-07-23 12:18:11',
'author': '旺小四',
'detail': '还有,这是谁写的诗?',
'title': '此时相忘不相闻下一句是什么?',
'vote_down': 3,
'vote_up': 100},
{'_id': ObjectId('5ead107ed1174b0308255c39'),
'ask_time': '2018-07-23 12:18:11',
'author': '赵小五',
'detail': '孵蛋除了温度还需要什么?',
'title': '把微波炉温度调低一些,可以孵鸡蛋吗?',
'vote_down': 3,
'vote_up': 23},
{'_id': ObjectId('5ead107ed1174b0308255c3a'),
'ask_time': '2018-07-23 12:18:11',
'author': '朱小六',
'detail': '请回答具体原因。',
'title': '四大名著你喜欢哪一本?',
'vote_down': 2,
'vote_up': 70},
{'_id': ObjectId('5ead107ed1174b0308255c3b'),
'ask_time': '2018-07-23 12:18:11',
'author': '马小七',
'detail': '这本书的作者又是是呢?',
'title': '你知道明朝时期的四大名著,除了《西游记》《水浒传》和《三国演义》还有一本是什么吗?',
'vote_down': 16,
'vote_up': 120}]
handler.insert_many(question) | none | 1 | 2.497104 | 2 | |
strava/contrib/strava_django/__init__.py | DavidLSO/python-strava | 1 | 6616576 | <gh_stars>1-10
default_app_config = "strava.contrib.strava_django.apps.StravaAuthConfig"
| default_app_config = "strava.contrib.strava_django.apps.StravaAuthConfig" | none | 1 | 0.983517 | 1 | |
openapi_server/models/collection.py | eugenegesdisc/gmuedr | 0 | 6616577 | <filename>openapi_server/models/collection.py
# coding: utf-8
from datetime import date, datetime
from typing import List, Dict, Type
from openapi_server.models.base_model_ import Model
from openapi_server.models.collection_data_queries import CollectionDataQueries
from openapi_server.models.extent import Extent
from openapi_server.models.link import Link
from openapi_server import util
class Collection(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, links: List[Link]=None, id: str=None, title: str=None, description: str=None, keywords: List[str]=None, extent: Extent=None, data_queries: CollectionDataQueries=None, crs: List[str]=None, output_formats: List[str]=None, parameter_names: Dict[str, object]=None):
"""Collection - a model defined in OpenAPI
:param links: The links of this Collection.
:param id: The id of this Collection.
:param title: The title of this Collection.
:param description: The description of this Collection.
:param keywords: The keywords of this Collection.
:param extent: The extent of this Collection.
:param data_queries: The data_queries of this Collection.
:param crs: The crs of this Collection.
:param output_formats: The output_formats of this Collection.
:param parameter_names: The parameter_names of this Collection.
"""
self.openapi_types = {
'links': List[Link],
'id': str,
'title': str,
'description': str,
'keywords': List[str],
'extent': Extent,
'data_queries': CollectionDataQueries,
'crs': List[str],
'output_formats': List[str],
'parameter_names': Dict[str, object]
}
self.attribute_map = {
'links': 'links',
'id': 'id',
'title': 'title',
'description': 'description',
'keywords': 'keywords',
'extent': 'extent',
'data_queries': 'data_queries',
'crs': 'crs',
'output_formats': 'output_formats',
'parameter_names': 'parameter_names'
}
self._links = links
self._id = id
self._title = title
self._description = description
self._keywords = keywords
self._extent = extent
self._data_queries = data_queries
self._crs = crs
self._output_formats = output_formats
self._parameter_names = parameter_names
@classmethod
def from_dict(cls, dikt: dict) -> 'Collection':
"""Returns the dict as a model
:param dikt: A dict.
:return: The collection of this Collection.
"""
return util.deserialize_model(dikt, cls)
@property
def links(self):
"""Gets the links of this Collection.
:return: The links of this Collection.
:rtype: List[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this Collection.
:param links: The links of this Collection.
:type links: List[Link]
"""
if links is None:
raise ValueError("Invalid value for `links`, must not be `None`")
self._links = links
@property
def id(self):
"""Gets the id of this Collection.
id of the collection
:return: The id of this Collection.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Collection.
id of the collection
:param id: The id of this Collection.
:type id: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`")
self._id = id
@property
def title(self):
"""Gets the title of this Collection.
title of the collection
:return: The title of this Collection.
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this Collection.
title of the collection
:param title: The title of this Collection.
:type title: str
"""
self._title = title
@property
def description(self):
"""Gets the description of this Collection.
description of the collection
:return: The description of this Collection.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Collection.
description of the collection
:param description: The description of this Collection.
:type description: str
"""
self._description = description
@property
def keywords(self):
"""Gets the keywords of this Collection.
List of keywords which help to describe the collection
:return: The keywords of this Collection.
:rtype: List[str]
"""
return self._keywords
@keywords.setter
def keywords(self, keywords):
"""Sets the keywords of this Collection.
List of keywords which help to describe the collection
:param keywords: The keywords of this Collection.
:type keywords: List[str]
"""
self._keywords = keywords
@property
def extent(self):
"""Gets the extent of this Collection.
:return: The extent of this Collection.
:rtype: Extent
"""
return self._extent
@extent.setter
def extent(self, extent):
"""Sets the extent of this Collection.
:param extent: The extent of this Collection.
:type extent: Extent
"""
if extent is None:
raise ValueError("Invalid value for `extent`, must not be `None`")
self._extent = extent
@property
def data_queries(self):
"""Gets the data_queries of this Collection.
:return: The data_queries of this Collection.
:rtype: CollectionDataQueries
"""
return self._data_queries
@data_queries.setter
def data_queries(self, data_queries):
"""Sets the data_queries of this Collection.
:param data_queries: The data_queries of this Collection.
:type data_queries: CollectionDataQueries
"""
self._data_queries = data_queries
@property
def crs(self):
"""Gets the crs of this Collection.
list of the coordinate reference systems the collection results can support
:return: The crs of this Collection.
:rtype: List[str]
"""
return self._crs
@crs.setter
def crs(self, crs):
"""Sets the crs of this Collection.
list of the coordinate reference systems the collection results can support
:param crs: The crs of this Collection.
:type crs: List[str]
"""
self._crs = crs
@property
def output_formats(self):
"""Gets the output_formats of this Collection.
list of formats the results can be presented in
:return: The output_formats of this Collection.
:rtype: List[str]
"""
return self._output_formats
@output_formats.setter
def output_formats(self, output_formats):
"""Sets the output_formats of this Collection.
list of formats the results can be presented in
:param output_formats: The output_formats of this Collection.
:type output_formats: List[str]
"""
self._output_formats = output_formats
@property
def parameter_names(self):
"""Gets the parameter_names of this Collection.
list of the data parameters available in the collection
:return: The parameter_names of this Collection.
:rtype: Dict[str, object]
"""
return self._parameter_names
@parameter_names.setter
def parameter_names(self, parameter_names):
"""Sets the parameter_names of this Collection.
list of the data parameters available in the collection
:param parameter_names: The parameter_names of this Collection.
:type parameter_names: Dict[str, object]
"""
self._parameter_names = parameter_names
| <filename>openapi_server/models/collection.py
# coding: utf-8
from datetime import date, datetime
from typing import List, Dict, Type
from openapi_server.models.base_model_ import Model
from openapi_server.models.collection_data_queries import CollectionDataQueries
from openapi_server.models.extent import Extent
from openapi_server.models.link import Link
from openapi_server import util
class Collection(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, links: List[Link]=None, id: str=None, title: str=None, description: str=None, keywords: List[str]=None, extent: Extent=None, data_queries: CollectionDataQueries=None, crs: List[str]=None, output_formats: List[str]=None, parameter_names: Dict[str, object]=None):
"""Collection - a model defined in OpenAPI
:param links: The links of this Collection.
:param id: The id of this Collection.
:param title: The title of this Collection.
:param description: The description of this Collection.
:param keywords: The keywords of this Collection.
:param extent: The extent of this Collection.
:param data_queries: The data_queries of this Collection.
:param crs: The crs of this Collection.
:param output_formats: The output_formats of this Collection.
:param parameter_names: The parameter_names of this Collection.
"""
self.openapi_types = {
'links': List[Link],
'id': str,
'title': str,
'description': str,
'keywords': List[str],
'extent': Extent,
'data_queries': CollectionDataQueries,
'crs': List[str],
'output_formats': List[str],
'parameter_names': Dict[str, object]
}
self.attribute_map = {
'links': 'links',
'id': 'id',
'title': 'title',
'description': 'description',
'keywords': 'keywords',
'extent': 'extent',
'data_queries': 'data_queries',
'crs': 'crs',
'output_formats': 'output_formats',
'parameter_names': 'parameter_names'
}
self._links = links
self._id = id
self._title = title
self._description = description
self._keywords = keywords
self._extent = extent
self._data_queries = data_queries
self._crs = crs
self._output_formats = output_formats
self._parameter_names = parameter_names
@classmethod
def from_dict(cls, dikt: dict) -> 'Collection':
"""Returns the dict as a model
:param dikt: A dict.
:return: The collection of this Collection.
"""
return util.deserialize_model(dikt, cls)
@property
def links(self):
"""Gets the links of this Collection.
:return: The links of this Collection.
:rtype: List[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this Collection.
:param links: The links of this Collection.
:type links: List[Link]
"""
if links is None:
raise ValueError("Invalid value for `links`, must not be `None`")
self._links = links
@property
def id(self):
"""Gets the id of this Collection.
id of the collection
:return: The id of this Collection.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Collection.
id of the collection
:param id: The id of this Collection.
:type id: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`")
self._id = id
@property
def title(self):
"""Gets the title of this Collection.
title of the collection
:return: The title of this Collection.
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this Collection.
title of the collection
:param title: The title of this Collection.
:type title: str
"""
self._title = title
@property
def description(self):
"""Gets the description of this Collection.
description of the collection
:return: The description of this Collection.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Collection.
description of the collection
:param description: The description of this Collection.
:type description: str
"""
self._description = description
@property
def keywords(self):
"""Gets the keywords of this Collection.
List of keywords which help to describe the collection
:return: The keywords of this Collection.
:rtype: List[str]
"""
return self._keywords
@keywords.setter
def keywords(self, keywords):
"""Sets the keywords of this Collection.
List of keywords which help to describe the collection
:param keywords: The keywords of this Collection.
:type keywords: List[str]
"""
self._keywords = keywords
@property
def extent(self):
"""Gets the extent of this Collection.
:return: The extent of this Collection.
:rtype: Extent
"""
return self._extent
@extent.setter
def extent(self, extent):
"""Sets the extent of this Collection.
:param extent: The extent of this Collection.
:type extent: Extent
"""
if extent is None:
raise ValueError("Invalid value for `extent`, must not be `None`")
self._extent = extent
@property
def data_queries(self):
"""Gets the data_queries of this Collection.
:return: The data_queries of this Collection.
:rtype: CollectionDataQueries
"""
return self._data_queries
@data_queries.setter
def data_queries(self, data_queries):
"""Sets the data_queries of this Collection.
:param data_queries: The data_queries of this Collection.
:type data_queries: CollectionDataQueries
"""
self._data_queries = data_queries
@property
def crs(self):
"""Gets the crs of this Collection.
list of the coordinate reference systems the collection results can support
:return: The crs of this Collection.
:rtype: List[str]
"""
return self._crs
@crs.setter
def crs(self, crs):
"""Sets the crs of this Collection.
list of the coordinate reference systems the collection results can support
:param crs: The crs of this Collection.
:type crs: List[str]
"""
self._crs = crs
@property
def output_formats(self):
"""Gets the output_formats of this Collection.
list of formats the results can be presented in
:return: The output_formats of this Collection.
:rtype: List[str]
"""
return self._output_formats
@output_formats.setter
def output_formats(self, output_formats):
"""Sets the output_formats of this Collection.
list of formats the results can be presented in
:param output_formats: The output_formats of this Collection.
:type output_formats: List[str]
"""
self._output_formats = output_formats
@property
def parameter_names(self):
"""Gets the parameter_names of this Collection.
list of the data parameters available in the collection
:return: The parameter_names of this Collection.
:rtype: Dict[str, object]
"""
return self._parameter_names
@parameter_names.setter
def parameter_names(self, parameter_names):
"""Sets the parameter_names of this Collection.
list of the data parameters available in the collection
:param parameter_names: The parameter_names of this Collection.
:type parameter_names: Dict[str, object]
"""
self._parameter_names = parameter_names
| en | 0.722945 | # coding: utf-8 NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). Do not edit the class manually. Collection - a model defined in OpenAPI :param links: The links of this Collection. :param id: The id of this Collection. :param title: The title of this Collection. :param description: The description of this Collection. :param keywords: The keywords of this Collection. :param extent: The extent of this Collection. :param data_queries: The data_queries of this Collection. :param crs: The crs of this Collection. :param output_formats: The output_formats of this Collection. :param parameter_names: The parameter_names of this Collection. Returns the dict as a model :param dikt: A dict. :return: The collection of this Collection. Gets the links of this Collection. :return: The links of this Collection. :rtype: List[Link] Sets the links of this Collection. :param links: The links of this Collection. :type links: List[Link] Gets the id of this Collection. id of the collection :return: The id of this Collection. :rtype: str Sets the id of this Collection. id of the collection :param id: The id of this Collection. :type id: str Gets the title of this Collection. title of the collection :return: The title of this Collection. :rtype: str Sets the title of this Collection. title of the collection :param title: The title of this Collection. :type title: str Gets the description of this Collection. description of the collection :return: The description of this Collection. :rtype: str Sets the description of this Collection. description of the collection :param description: The description of this Collection. :type description: str Gets the keywords of this Collection. List of keywords which help to describe the collection :return: The keywords of this Collection. :rtype: List[str] Sets the keywords of this Collection. List of keywords which help to describe the collection :param keywords: The keywords of this Collection. :type keywords: List[str] Gets the extent of this Collection. :return: The extent of this Collection. :rtype: Extent Sets the extent of this Collection. :param extent: The extent of this Collection. :type extent: Extent Gets the data_queries of this Collection. :return: The data_queries of this Collection. :rtype: CollectionDataQueries Sets the data_queries of this Collection. :param data_queries: The data_queries of this Collection. :type data_queries: CollectionDataQueries Gets the crs of this Collection. list of the coordinate reference systems the collection results can support :return: The crs of this Collection. :rtype: List[str] Sets the crs of this Collection. list of the coordinate reference systems the collection results can support :param crs: The crs of this Collection. :type crs: List[str] Gets the output_formats of this Collection. list of formats the results can be presented in :return: The output_formats of this Collection. :rtype: List[str] Sets the output_formats of this Collection. list of formats the results can be presented in :param output_formats: The output_formats of this Collection. :type output_formats: List[str] Gets the parameter_names of this Collection. list of the data parameters available in the collection :return: The parameter_names of this Collection. :rtype: Dict[str, object] Sets the parameter_names of this Collection. list of the data parameters available in the collection :param parameter_names: The parameter_names of this Collection. :type parameter_names: Dict[str, object] | 2.385984 | 2 |
src/main.py | FranciscoCharles/clone-tetris-game | 0 | 6616578 | from os import environ
if 'PYGAME_HIDE_SUPPORT_PROMPT' not in environ:
environ['PYGAME_HIDE_SUPPORT_PROMPT'] = 'hidden'
del environ
import json
import pygame
from os.path import normpath
from image import pill_img_open_and_resize
from figure import create_list_blocks_img,Board,Piece
class Game:
def __init__(self):
self.config()
def config(self):
pygame.init()
pygame.mixer.init()
pygame.font.init()
self.SCREEN_W = 460
self.SCREEN_H = 500
pygame.display.set_caption('TetrisGame v1.1.2')
pygame.display.set_icon(pygame.image.load('./images/icon.png'))
self.display = pygame.display.set_mode((self.SCREEN_W,self.SCREEN_H))
self.is_pause = False
self.background = None
self.piece = Piece()
self.next_piece = Piece()
self.temp_piece = Piece()
self.board = Board(23,12)
self.score = 0
self.max_score = self.load_file_score()
self.START_POSITION_PIECE_X = 5
self.pause_img = pill_img_open_and_resize('./images/pause.png', (50,50))
self.block_pieces = create_list_blocks_img('./images/peca.png', size=(20,20))
self.clock = pygame.time.Clock()
font = pygame.font.SysFont('Verdana', 10)
self.text_pause = font.render(f'Pause', True, (255,255,255))
font = pygame.font.SysFont('Verdana', 20)
font.set_bold(True)
self.font = font
self.open_sound_config()
def draw_or_hidden_pause(self, x:int, y:int):
width, height = self.pause_img.get_size()
text_height = self.text_pause.get_height()
pygame.draw.rect(self.display, (0), (x, y, width, height+text_height))
self.is_pause = not self.is_pause
if self.is_pause:
text_width = (self.text_pause.get_width()-width)//2
self.display.blit(self.pause_img,(x, y))
self.display.blit(self.text_pause, (x-text_width, y+height))
def update_and_draw_scores(self):
font = self.font
pygame.draw.rect(self.display, 0, (290,160,150,120))
text_score = font.render(f'score:', True, (255,255,255))
self.display.blit(text_score,(290,160))
text_score = font.render(f'{self.score:09d}', True, (255,255,255))
self.display.blit(text_score,(300,190))
text_max_score = font.render(f'max score:', True, (255,255,255))
self.display.blit(text_max_score,(290, 220))
text_max_score = font.render(f'{self.max_score:09d}', True, (255,255,255))
self.display.blit(text_max_score,(300,250))
def draw_next_piece(self):
font = self.font
pygame.draw.rect(self.display, 0, (290,20,150,140))
text = font.render(f'next piece:', True, (255,255,255))
self.display.blit(text,(300,20))
self.next_piece.set_pos(340,60)
self.next_piece.draw(self.display)
def move_or_rotate_piece(self, current_key) -> bool:
if current_key in ['up','w'] and self.piece.index_figure != 6:
self.piece.rotate()
else:
self.piece.move_or_rotate(current_key)
if not self.board.check(self.piece):
self.piece = self.temp_piece.copy()
self.piece.update()
if not self.board.check(self.piece):
self.piece.move_to(self.START_POSITION_PIECE_X, 0)
return True
return False
def run_loop(self):
FPS = 2
FPS_ACCUMULATOR = FPS
FPS_INCREMENT = 0.125
SIZE = 20
XMAX, YMAX = (23, 20)
OFFSETX = SIZE
KEY_PRESS_DELAY = 40
SCORE_TO_NEXT_VELOCITY = 80
SCORE_INCREMENT = 100
ticks = 0
game = True
key_pressed = False
current_key = None
self.score = 0
list_scores = [0, 10, 30, 60, 100]
valid_keys = ['left','right','down','up','w','a','s','z']
self.background = pygame.Surface((XMAX*SIZE,YMAX*SIZE))
blocks = self.block_pieces
board = self.board
board.draw_background(self.display, (OFFSETX, 20), blocks)
self.piece = Piece((OFFSETX, 20))
self.piece.set_random_piece(blocks)
self.next_piece.set_random_piece(blocks)
self.piece.move_to(self.START_POSITION_PIECE_X,0)
self.temp_piece = self.piece.copy()
self.update_and_draw_scores()
while game:
self.clock.tick(FPS)
FPS = FPS_ACCUMULATOR
for e in pygame.event.get():
if e.type == pygame.QUIT:
game = False
break
elif e.type == pygame.KEYDOWN:
key = pygame.key.name(e.key)
if key in ['escape']:
game = False
break
elif key in valid_keys:
ticks = pygame.time.get_ticks()
current_key = key
key_pressed = True
self.move_or_rotate_piece(key)
elif key in ['p', 'space']:
self.draw_or_hidden_pause(340,310)
pygame.display.flip()
elif e.type == pygame.KEYUP:
key_pressed = False
current_key = None
if key_pressed:
if current_key in ['z','down'] and FPS_ACCUMULATOR < 30:
FPS = 30
elif (pygame.time.get_ticks()-ticks) > KEY_PRESS_DELAY:
FPS = FPS if self.move_or_rotate_piece(current_key) else 15
if not self.is_pause and game:
self.temp_piece.draw(self.display, False)
self.piece.draw(self.display)
self.draw_next_piece()
self.temp_piece = self.piece.copy()
self.piece.update()
if not board.check(self.piece):
board.add_piece(self.piece)
self.piece.set_piece(self.next_piece.index_figure,blocks)
self.next_piece.set_random_piece(blocks)
self.piece.move_to(self.START_POSITION_PIECE_X, 0)
score = board.board_score(self.display, blocks[0], (OFFSETX,20))
if score > 0:
score = list_scores[ score ]
self.play_score_sound()
self.score += score
self.update_and_draw_scores()
while self.score >= SCORE_TO_NEXT_VELOCITY:
SCORE_TO_NEXT_VELOCITY += SCORE_INCREMENT
FPS_ACCUMULATOR += FPS_INCREMENT
if not board.check(self.piece):
pygame.mixer.music.stop()
game = self.show_screen_game_over()
if not game:
break
pygame.mixer.music.play(-1)
board.reset()
self.update_and_draw_scores()
board.draw_background(self.display, (OFFSETX,20), blocks)
FPS = 2
FPS_ACCUMULATOR = 2
self.max_score = self.save_score_in_file()
self.score = 0
self.update_and_draw_scores()
continue
board.draw(self.display, (OFFSETX,20), blocks)
pygame.display.flip()
self.quit_game()
def show_screen_game_over(self):
list_piece = []
for n_piece, index_figure in enumerate([2, 3, 4, 5]):
piece = Piece((n_piece*100+60, 240))
piece.set_piece(index_figure, self.block_pieces)
list_piece.append(piece)
self.play_game_over_sound()
font = pygame.font.SysFont('Arial black', 50)
font.set_bold(True)
text = font.render('GAME OVER', True, (255,255,255))
info = pygame.font.SysFont('default', 25)
info.set_bold(True)
text_info = info.render('press Enter to continue or ESC to quit.', True, (255,255,255))
ticks = 0
game = True
next_game = False
while game:
ticks += self.clock.tick(30)
self.display.fill((0))
for e in pygame.event.get():
if e.type == pygame.QUIT:
game = False
break
elif e.type == pygame.KEYDOWN:
key = pygame.key.name(e.key)
if key == 'escape':
game = False
break
elif key == 'return':
next_game = True
game = False
break
if game:
if 300 < ticks < 600:
self.display.blit(text, (40,100))
if ticks > 600:
ticks = 0
self.display.blit(text_info, (50,420))
for piece in list_piece:
piece.draw(self.display)
pygame.display.flip()
return next_game
def quit_game(self):
pygame.quit()
def load_file_score(self):
try:
file = open('score.txt')
except FileNotFoundError:
with open('score.txt','w') as file:
for _ in range(5):
file.write('0\n')
file = open('score.txt')
with file:
max_score = file.readline().strip()
return int(max_score)
def save_score_in_file(self) -> int:
scores = []
with open('score.txt') as file:
for _ in range(5):
scores += [ int(file.readline().strip()) ]
with open('score.txt','w') as file:
scores += [self.score]
scores.sort(reverse=True)
scores.pop()
self.max_score = scores[0]
for score in scores:
file.writelines(f'{score}\n')
return self.max_score
def play_score_sound(self):
if self.effect is not None:
self.effect.play()
def play_game_over_sound(self):
if self.game_over is not None:
self.game_over.play()
def open_sound_config(self):
self.effect = None
self.game_over = None
try:
with open('sound.json','r') as file:
data = json.load(file)
path = data['music'].strip()
if path!="":
pygame.mixer.music.load(normpath(path))
pygame.mixer.music.play(-1)
path = data['score'].strip()
if path!="":
self.effect = pygame.mixer.Sound(normpath(path))
path = data['game-over'].strip()
if path!="":
self.game_over = pygame.mixer.Sound(normpath(path))
except:
pygame.mixer.music.stop()
self.effect = None
self.game_over = None
game = Game()
game.run_loop()
| from os import environ
if 'PYGAME_HIDE_SUPPORT_PROMPT' not in environ:
environ['PYGAME_HIDE_SUPPORT_PROMPT'] = 'hidden'
del environ
import json
import pygame
from os.path import normpath
from image import pill_img_open_and_resize
from figure import create_list_blocks_img,Board,Piece
class Game:
def __init__(self):
self.config()
def config(self):
pygame.init()
pygame.mixer.init()
pygame.font.init()
self.SCREEN_W = 460
self.SCREEN_H = 500
pygame.display.set_caption('TetrisGame v1.1.2')
pygame.display.set_icon(pygame.image.load('./images/icon.png'))
self.display = pygame.display.set_mode((self.SCREEN_W,self.SCREEN_H))
self.is_pause = False
self.background = None
self.piece = Piece()
self.next_piece = Piece()
self.temp_piece = Piece()
self.board = Board(23,12)
self.score = 0
self.max_score = self.load_file_score()
self.START_POSITION_PIECE_X = 5
self.pause_img = pill_img_open_and_resize('./images/pause.png', (50,50))
self.block_pieces = create_list_blocks_img('./images/peca.png', size=(20,20))
self.clock = pygame.time.Clock()
font = pygame.font.SysFont('Verdana', 10)
self.text_pause = font.render(f'Pause', True, (255,255,255))
font = pygame.font.SysFont('Verdana', 20)
font.set_bold(True)
self.font = font
self.open_sound_config()
def draw_or_hidden_pause(self, x:int, y:int):
width, height = self.pause_img.get_size()
text_height = self.text_pause.get_height()
pygame.draw.rect(self.display, (0), (x, y, width, height+text_height))
self.is_pause = not self.is_pause
if self.is_pause:
text_width = (self.text_pause.get_width()-width)//2
self.display.blit(self.pause_img,(x, y))
self.display.blit(self.text_pause, (x-text_width, y+height))
def update_and_draw_scores(self):
font = self.font
pygame.draw.rect(self.display, 0, (290,160,150,120))
text_score = font.render(f'score:', True, (255,255,255))
self.display.blit(text_score,(290,160))
text_score = font.render(f'{self.score:09d}', True, (255,255,255))
self.display.blit(text_score,(300,190))
text_max_score = font.render(f'max score:', True, (255,255,255))
self.display.blit(text_max_score,(290, 220))
text_max_score = font.render(f'{self.max_score:09d}', True, (255,255,255))
self.display.blit(text_max_score,(300,250))
def draw_next_piece(self):
font = self.font
pygame.draw.rect(self.display, 0, (290,20,150,140))
text = font.render(f'next piece:', True, (255,255,255))
self.display.blit(text,(300,20))
self.next_piece.set_pos(340,60)
self.next_piece.draw(self.display)
def move_or_rotate_piece(self, current_key) -> bool:
if current_key in ['up','w'] and self.piece.index_figure != 6:
self.piece.rotate()
else:
self.piece.move_or_rotate(current_key)
if not self.board.check(self.piece):
self.piece = self.temp_piece.copy()
self.piece.update()
if not self.board.check(self.piece):
self.piece.move_to(self.START_POSITION_PIECE_X, 0)
return True
return False
def run_loop(self):
FPS = 2
FPS_ACCUMULATOR = FPS
FPS_INCREMENT = 0.125
SIZE = 20
XMAX, YMAX = (23, 20)
OFFSETX = SIZE
KEY_PRESS_DELAY = 40
SCORE_TO_NEXT_VELOCITY = 80
SCORE_INCREMENT = 100
ticks = 0
game = True
key_pressed = False
current_key = None
self.score = 0
list_scores = [0, 10, 30, 60, 100]
valid_keys = ['left','right','down','up','w','a','s','z']
self.background = pygame.Surface((XMAX*SIZE,YMAX*SIZE))
blocks = self.block_pieces
board = self.board
board.draw_background(self.display, (OFFSETX, 20), blocks)
self.piece = Piece((OFFSETX, 20))
self.piece.set_random_piece(blocks)
self.next_piece.set_random_piece(blocks)
self.piece.move_to(self.START_POSITION_PIECE_X,0)
self.temp_piece = self.piece.copy()
self.update_and_draw_scores()
while game:
self.clock.tick(FPS)
FPS = FPS_ACCUMULATOR
for e in pygame.event.get():
if e.type == pygame.QUIT:
game = False
break
elif e.type == pygame.KEYDOWN:
key = pygame.key.name(e.key)
if key in ['escape']:
game = False
break
elif key in valid_keys:
ticks = pygame.time.get_ticks()
current_key = key
key_pressed = True
self.move_or_rotate_piece(key)
elif key in ['p', 'space']:
self.draw_or_hidden_pause(340,310)
pygame.display.flip()
elif e.type == pygame.KEYUP:
key_pressed = False
current_key = None
if key_pressed:
if current_key in ['z','down'] and FPS_ACCUMULATOR < 30:
FPS = 30
elif (pygame.time.get_ticks()-ticks) > KEY_PRESS_DELAY:
FPS = FPS if self.move_or_rotate_piece(current_key) else 15
if not self.is_pause and game:
self.temp_piece.draw(self.display, False)
self.piece.draw(self.display)
self.draw_next_piece()
self.temp_piece = self.piece.copy()
self.piece.update()
if not board.check(self.piece):
board.add_piece(self.piece)
self.piece.set_piece(self.next_piece.index_figure,blocks)
self.next_piece.set_random_piece(blocks)
self.piece.move_to(self.START_POSITION_PIECE_X, 0)
score = board.board_score(self.display, blocks[0], (OFFSETX,20))
if score > 0:
score = list_scores[ score ]
self.play_score_sound()
self.score += score
self.update_and_draw_scores()
while self.score >= SCORE_TO_NEXT_VELOCITY:
SCORE_TO_NEXT_VELOCITY += SCORE_INCREMENT
FPS_ACCUMULATOR += FPS_INCREMENT
if not board.check(self.piece):
pygame.mixer.music.stop()
game = self.show_screen_game_over()
if not game:
break
pygame.mixer.music.play(-1)
board.reset()
self.update_and_draw_scores()
board.draw_background(self.display, (OFFSETX,20), blocks)
FPS = 2
FPS_ACCUMULATOR = 2
self.max_score = self.save_score_in_file()
self.score = 0
self.update_and_draw_scores()
continue
board.draw(self.display, (OFFSETX,20), blocks)
pygame.display.flip()
self.quit_game()
def show_screen_game_over(self):
list_piece = []
for n_piece, index_figure in enumerate([2, 3, 4, 5]):
piece = Piece((n_piece*100+60, 240))
piece.set_piece(index_figure, self.block_pieces)
list_piece.append(piece)
self.play_game_over_sound()
font = pygame.font.SysFont('Arial black', 50)
font.set_bold(True)
text = font.render('GAME OVER', True, (255,255,255))
info = pygame.font.SysFont('default', 25)
info.set_bold(True)
text_info = info.render('press Enter to continue or ESC to quit.', True, (255,255,255))
ticks = 0
game = True
next_game = False
while game:
ticks += self.clock.tick(30)
self.display.fill((0))
for e in pygame.event.get():
if e.type == pygame.QUIT:
game = False
break
elif e.type == pygame.KEYDOWN:
key = pygame.key.name(e.key)
if key == 'escape':
game = False
break
elif key == 'return':
next_game = True
game = False
break
if game:
if 300 < ticks < 600:
self.display.blit(text, (40,100))
if ticks > 600:
ticks = 0
self.display.blit(text_info, (50,420))
for piece in list_piece:
piece.draw(self.display)
pygame.display.flip()
return next_game
def quit_game(self):
pygame.quit()
def load_file_score(self):
try:
file = open('score.txt')
except FileNotFoundError:
with open('score.txt','w') as file:
for _ in range(5):
file.write('0\n')
file = open('score.txt')
with file:
max_score = file.readline().strip()
return int(max_score)
def save_score_in_file(self) -> int:
scores = []
with open('score.txt') as file:
for _ in range(5):
scores += [ int(file.readline().strip()) ]
with open('score.txt','w') as file:
scores += [self.score]
scores.sort(reverse=True)
scores.pop()
self.max_score = scores[0]
for score in scores:
file.writelines(f'{score}\n')
return self.max_score
def play_score_sound(self):
if self.effect is not None:
self.effect.play()
def play_game_over_sound(self):
if self.game_over is not None:
self.game_over.play()
def open_sound_config(self):
self.effect = None
self.game_over = None
try:
with open('sound.json','r') as file:
data = json.load(file)
path = data['music'].strip()
if path!="":
pygame.mixer.music.load(normpath(path))
pygame.mixer.music.play(-1)
path = data['score'].strip()
if path!="":
self.effect = pygame.mixer.Sound(normpath(path))
path = data['game-over'].strip()
if path!="":
self.game_over = pygame.mixer.Sound(normpath(path))
except:
pygame.mixer.music.stop()
self.effect = None
self.game_over = None
game = Game()
game.run_loop()
| none | 1 | 2.391308 | 2 | |
termsearch/settings/base.py | olivmaurel/termsearch | 0 | 6616579 | import os
from django.core.exceptions import ImproperlyConfigured
def get_env_variable(var_name):
"""Get the environment variable or return exception."""
try:
return os.environ[var_name]
except KeyError:
error_msg = "Set the {} environment variable".format(var_name)
raise ImproperlyConfigured(error_msg)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# need to call os.path.dirname to go up one level in the path (like ../)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
JINJA2_DIR = os.path.join(BASE_DIR, 'templates/jinja2')
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = set in local_settings.py
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'aggregator.apps.AggregatorConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ckeditor',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'termsearch.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'DIRS': [ JINJA2_DIR,
],
'APP_DIRS': True,
'OPTIONS': {
'environment': 'termsearch.jinja2utils.environment',
},
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'termsearch.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
# imported from local_settings.py
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
# top level static directory
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
# ckeditor WYSIWYG stuff
MEDIA_URL = '/media/'
CKEDITOR_UPLOAD_PATH= 'uploads/'
# Logging settings
# Overwrite the default settings
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'formatters': {
'simple': {
'format': '[%(asctime)s] %(levelname)s %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
'verbose': {
'format': '[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
},
'handlers': {
'development_logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR, 'logs/django_dev.log'),
'filters': ['require_debug_true'],
'maxBytes': 1024 * 1024 * 15, # 15MB
'backupCount': 50,
'formatter': 'verbose',
},
'production_logfile': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filters': ['require_debug_false'],
'filename': os.path.join(BASE_DIR,'logs/django_production.log'),
'maxBytes': 1024 * 1024 * 15, # 15MB
'backupCount': 50,
'formatter': 'simple',
},
'dba_logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR,'logs/django_dba.log'),
'maxBytes': 1024 * 1024 * 15, # 15MB
'backupCount': 50,
'formatter': 'verbose',
},
'security_logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filters': ['require_debug_false'],
'filename': os.path.join(BASE_DIR, 'logs/django_security.log'),
'maxBytes': 1024 * 1024 * 15, # 15MB
'backupCount': 50,
'formatter': 'verbose',
},
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
'null': {
'class': 'logging.NullHandler',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'aggregator': {
'handlers': ['console', 'development_logfile', 'production_logfile'],
'level': 'DEBUG',
},
'dba': {
'handlers': ['console', 'dba_logfile'],
},
'django': {
'handlers': ['console', 'development_logfile', 'production_logfile'],
},
'django.security': {
'handlers': ['console', 'security_logfile'],
'propagate': False,
},
'py.warnings': {
'handlers': ['console', 'development_logfile'],
},
}
}
| import os
from django.core.exceptions import ImproperlyConfigured
def get_env_variable(var_name):
"""Get the environment variable or return exception."""
try:
return os.environ[var_name]
except KeyError:
error_msg = "Set the {} environment variable".format(var_name)
raise ImproperlyConfigured(error_msg)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# need to call os.path.dirname to go up one level in the path (like ../)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
JINJA2_DIR = os.path.join(BASE_DIR, 'templates/jinja2')
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = set in local_settings.py
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'aggregator.apps.AggregatorConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ckeditor',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'termsearch.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'DIRS': [ JINJA2_DIR,
],
'APP_DIRS': True,
'OPTIONS': {
'environment': 'termsearch.jinja2utils.environment',
},
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'termsearch.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
# imported from local_settings.py
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
# top level static directory
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
# ckeditor WYSIWYG stuff
MEDIA_URL = '/media/'
CKEDITOR_UPLOAD_PATH= 'uploads/'
# Logging settings
# Overwrite the default settings
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'formatters': {
'simple': {
'format': '[%(asctime)s] %(levelname)s %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
'verbose': {
'format': '[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
},
'handlers': {
'development_logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR, 'logs/django_dev.log'),
'filters': ['require_debug_true'],
'maxBytes': 1024 * 1024 * 15, # 15MB
'backupCount': 50,
'formatter': 'verbose',
},
'production_logfile': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filters': ['require_debug_false'],
'filename': os.path.join(BASE_DIR,'logs/django_production.log'),
'maxBytes': 1024 * 1024 * 15, # 15MB
'backupCount': 50,
'formatter': 'simple',
},
'dba_logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR,'logs/django_dba.log'),
'maxBytes': 1024 * 1024 * 15, # 15MB
'backupCount': 50,
'formatter': 'verbose',
},
'security_logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filters': ['require_debug_false'],
'filename': os.path.join(BASE_DIR, 'logs/django_security.log'),
'maxBytes': 1024 * 1024 * 15, # 15MB
'backupCount': 50,
'formatter': 'verbose',
},
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
'null': {
'class': 'logging.NullHandler',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'aggregator': {
'handlers': ['console', 'development_logfile', 'production_logfile'],
'level': 'DEBUG',
},
'dba': {
'handlers': ['console', 'dba_logfile'],
},
'django': {
'handlers': ['console', 'development_logfile', 'production_logfile'],
},
'django.security': {
'handlers': ['console', 'security_logfile'],
'propagate': False,
},
'py.warnings': {
'handlers': ['console', 'development_logfile'],
},
}
}
| en | 0.60384 | Get the environment variable or return exception. # Build paths inside the project like this: os.path.join(BASE_DIR, ...) # need to call os.path.dirname to go up one level in the path (like ../) # SECURITY WARNING: don't run with debug turned on in production! # DEBUG = set in local_settings.py # Application definition # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases # imported from local_settings.py # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ # top level static directory # ckeditor WYSIWYG stuff # Logging settings # Overwrite the default settings # 15MB # 15MB # 15MB # 15MB | 2.206927 | 2 |
code/make_dope_plot.py | evijit/hci_updates | 1 | 6616580 | #!/Users/swsprec/opt/anaconda3/bin/python3
import plotly.graph_objects as go
import sys
import json
from plotly.offline import plot
from datetime import date, timedelta, datetime
fig = go.Figure()
subreddit = sys.argv[1]
tech_or_no = sys.argv[2]
tech_or_no = int(tech_or_no)
events_file = "articles-%s.json" % subreddit
sentiments_file = "sentiment-%s.json" % subreddit
tech_ner_file = "tech-words-%s.json" % subreddit
react_ner_file = "react-words-%s.json" % subreddit
counts_file = "total_counts-%s.json" % subreddit
clips = {}
with open("clipping_dates.json", "r") as clip_in:
for line in clip_in.readlines():
clips = json.loads(line)
for sub, datez in clips.items():
clips[sub] = datetime.strptime(datez, "%m-%d-%Y")
earliest_date = clips[subreddit]
end_date = datetime(2019, 12, 4)
fill_x = []
delta = end_date - earliest_date
for i in range(delta.days + 1):
day = earliest_date + timedelta(days=i)
fill_x.append(datetime.strftime(day, "%m-%d-%Y"))
fill_y = [0] * len(fill_x)
fig.add_trace(
go.Scatter(
x = fill_x,
y = fill_y,
name = "For Time Scale",
)
)
## graphing total post/comments
totals = {}
with open(counts_file, "r") as count_in:
for line in count_in.readlines():
line = line.strip()
obj = json.loads(line)
totals = obj
cumulative = 0
new_y_vals = []
for y_val in totals["y"]:
new_y_vals.append((y_val + cumulative)/100.0)
cumulative += y_val
fig.add_trace(
go.Scatter(
x = totals["x"],
y = new_y_vals,
name = "Total Post / Comments",
mode="lines",
line = {"shape": "spline", "smoothing": 0.3, "color": "black", "dash": "dash"},
hovertemplate =
'<b>Date</b>: %{x}<br>' +
'<b>Number</b>: %{text}',
text = ["{}".format(int(x*100)) for x in new_y_vals]
)
)
events = {}
with open(events_file, "r") as event_in:
for line in event_in.readlines():
line = line.strip()
obj = json.loads(line)
events = obj
# graph
fig.add_trace(
go.Scatter(
x = events["x"],
y = [-1] * len(events["x"]),
hovertext = events["hovertext"],
name = "Events",
marker=dict(size=14, color="mediumvioletred"),
mode="markers"
)
)
sents = {}
with open(sentiments_file, "r") as sent_in:
for line in sent_in.readlines():
line = line.strip()
obj = json.loads(line)
sents = obj
# graph sentiments
fig.add_trace(
go.Scatter(
x = sents["x"],
y = sents["y"],
name = "Sentiment",
fill="tozeroy",
line = {"shape": "spline", "smoothing": 0.7, "color": "teal"}
)
)
if tech_or_no:
top_tech = {}
with open(tech_ner_file, "r") as ner_in:
for line in ner_in.readlines():
line = line.strip()
obj = json.loads(line)
top_tech = obj
for word, coords in top_tech.items():
cumulative = 0
new_y_vals = []
for y_val in coords["y"]:
new_y_vals.append((y_val + cumulative)/10.0)
cumulative += y_val
fig.add_trace(
go.Scatter(
x = coords["x"],
y = new_y_vals,
name = word,
mode="lines",
line = {"shape": "spline", "smoothing": 0.3},
hovertemplate =
'<b>Date</b>: %{x}<br>' +
'<b>Number</b>: %{text}',
text = ["{}".format(int(x*10)) for x in new_y_vals]
)
)
else:
top_react = {}
with open(react_ner_file, "r") as ner_in:
for line in ner_in.readlines():
line = line.strip()
obj = json.loads(line)
top_react = obj
for word, coords in top_react.items():
cumulative = 0
new_y_vals = []
for y_val in coords["y"]:
new_y_vals.append(y_val + cumulative)
cumulative += y_val
fig.add_trace(
go.Scatter(
x = coords["x"],
y = new_y_vals,
name = word,
mode="lines",
line = {"shape": "spline", "smoothing": 0.3},
hovertemplate =
'<b>Date</b>: %{x}<br>' +
'<b>Number</b>: %{text}',
text = ["{}".format(int(x*10)) for x in new_y_vals]
)
)
if tech_or_no:
t_or_r = "tech"
else:
t_or_r = "react"
fig.update_layout(
title="<b>Top %s words, sentiment and events for: %s</b><br>The frequencies of named"\
" entities is /10 and the total number of posts /100" % (t_or_r, subreddit),
xaxis_title = "Dates",
yaxis_title = "-1<->1 for sentiment, # of occurances for NER words"
)
plot(fig, filename='plots/dope_graph-%s-%s.html' % (subreddit, t_or_r))
#fig.show()
| #!/Users/swsprec/opt/anaconda3/bin/python3
import plotly.graph_objects as go
import sys
import json
from plotly.offline import plot
from datetime import date, timedelta, datetime
fig = go.Figure()
subreddit = sys.argv[1]
tech_or_no = sys.argv[2]
tech_or_no = int(tech_or_no)
events_file = "articles-%s.json" % subreddit
sentiments_file = "sentiment-%s.json" % subreddit
tech_ner_file = "tech-words-%s.json" % subreddit
react_ner_file = "react-words-%s.json" % subreddit
counts_file = "total_counts-%s.json" % subreddit
clips = {}
with open("clipping_dates.json", "r") as clip_in:
for line in clip_in.readlines():
clips = json.loads(line)
for sub, datez in clips.items():
clips[sub] = datetime.strptime(datez, "%m-%d-%Y")
earliest_date = clips[subreddit]
end_date = datetime(2019, 12, 4)
fill_x = []
delta = end_date - earliest_date
for i in range(delta.days + 1):
day = earliest_date + timedelta(days=i)
fill_x.append(datetime.strftime(day, "%m-%d-%Y"))
fill_y = [0] * len(fill_x)
fig.add_trace(
go.Scatter(
x = fill_x,
y = fill_y,
name = "For Time Scale",
)
)
## graphing total post/comments
totals = {}
with open(counts_file, "r") as count_in:
for line in count_in.readlines():
line = line.strip()
obj = json.loads(line)
totals = obj
cumulative = 0
new_y_vals = []
for y_val in totals["y"]:
new_y_vals.append((y_val + cumulative)/100.0)
cumulative += y_val
fig.add_trace(
go.Scatter(
x = totals["x"],
y = new_y_vals,
name = "Total Post / Comments",
mode="lines",
line = {"shape": "spline", "smoothing": 0.3, "color": "black", "dash": "dash"},
hovertemplate =
'<b>Date</b>: %{x}<br>' +
'<b>Number</b>: %{text}',
text = ["{}".format(int(x*100)) for x in new_y_vals]
)
)
events = {}
with open(events_file, "r") as event_in:
for line in event_in.readlines():
line = line.strip()
obj = json.loads(line)
events = obj
# graph
fig.add_trace(
go.Scatter(
x = events["x"],
y = [-1] * len(events["x"]),
hovertext = events["hovertext"],
name = "Events",
marker=dict(size=14, color="mediumvioletred"),
mode="markers"
)
)
sents = {}
with open(sentiments_file, "r") as sent_in:
for line in sent_in.readlines():
line = line.strip()
obj = json.loads(line)
sents = obj
# graph sentiments
fig.add_trace(
go.Scatter(
x = sents["x"],
y = sents["y"],
name = "Sentiment",
fill="tozeroy",
line = {"shape": "spline", "smoothing": 0.7, "color": "teal"}
)
)
if tech_or_no:
top_tech = {}
with open(tech_ner_file, "r") as ner_in:
for line in ner_in.readlines():
line = line.strip()
obj = json.loads(line)
top_tech = obj
for word, coords in top_tech.items():
cumulative = 0
new_y_vals = []
for y_val in coords["y"]:
new_y_vals.append((y_val + cumulative)/10.0)
cumulative += y_val
fig.add_trace(
go.Scatter(
x = coords["x"],
y = new_y_vals,
name = word,
mode="lines",
line = {"shape": "spline", "smoothing": 0.3},
hovertemplate =
'<b>Date</b>: %{x}<br>' +
'<b>Number</b>: %{text}',
text = ["{}".format(int(x*10)) for x in new_y_vals]
)
)
else:
top_react = {}
with open(react_ner_file, "r") as ner_in:
for line in ner_in.readlines():
line = line.strip()
obj = json.loads(line)
top_react = obj
for word, coords in top_react.items():
cumulative = 0
new_y_vals = []
for y_val in coords["y"]:
new_y_vals.append(y_val + cumulative)
cumulative += y_val
fig.add_trace(
go.Scatter(
x = coords["x"],
y = new_y_vals,
name = word,
mode="lines",
line = {"shape": "spline", "smoothing": 0.3},
hovertemplate =
'<b>Date</b>: %{x}<br>' +
'<b>Number</b>: %{text}',
text = ["{}".format(int(x*10)) for x in new_y_vals]
)
)
if tech_or_no:
t_or_r = "tech"
else:
t_or_r = "react"
fig.update_layout(
title="<b>Top %s words, sentiment and events for: %s</b><br>The frequencies of named"\
" entities is /10 and the total number of posts /100" % (t_or_r, subreddit),
xaxis_title = "Dates",
yaxis_title = "-1<->1 for sentiment, # of occurances for NER words"
)
plot(fig, filename='plots/dope_graph-%s-%s.html' % (subreddit, t_or_r))
#fig.show()
| en | 0.666331 | #!/Users/swsprec/opt/anaconda3/bin/python3 ## graphing total post/comments # graph # graph sentiments # of occurances for NER words" #fig.show() | 2.586731 | 3 |
lib/oltlib/fhat.py | tenddy/fhprojects | 0 | 6616581 | <reponame>tenddy/fhprojects
#!/usr/bin/env python
# coding=UTF-8
'''
@Desc: None
@Author: Teddy.tu
@Version: V1.0
@EMAIL: <EMAIL>
@License: (c)Copyright 2019-2020, Teddy.tu
@Date: 2020-01-12 21:24:26
@LastEditors: Teddy.tu
@LastEditTime: 2020-07-27 13:34:55
'''
import pandas as pd
import time
import configparser
import os
import sys
import traceback
try:
print("cwd:", os.getcwd())
if os.getcwd() not in sys.path:
sys.path.append(os.getcwd())
from lib.oltlib import fhlib
from lib.public.fhlog import logger, log_decare
from lib.public import dut_connect
from lib import settings
except Exception as err:
print("添加项目路径失败.")
print(err)
MAX_CONNECT_TIMES = 3 # 最大连接次数
OLT_VERSION_5K = 'V4'
OLT_VERSION_6K = 'V5'
class FHATException(Exception):
"""Base class for FHSTCException"""
class FHATCMDError(FHATException):
def __init__(self, command, error_message, stderr=''):
self.command = command
self.error_message = error_message.strip()
self.stderr = stderr
super().__init__(self.__str__())
def __str__(self):
msg = 'FHATCMDError raised while executing the command:"%s"\n error_message: "%s"' % (
self.command, self.error_message)
if self.stderr:
msg += '\n stderr: %s' % (self.stderr)
logger.error(msg)
return msg
class FH_OLT():
def __init__(self, version=OLT_VERSION_6K):
self.hostip = None # IP地址
self.hostport = None # 连接设备端口号
self.login_promot = dict() # 登录提示符
self.version = version # 版本信息
# 登录OLT telnet对象
self.__tn = None
self.connectTimes = 0
# 需要执行的命令行
self.__cmdlines = []
# 保存命令行运行log
# self.__ret = ""
# 命令执行结果, True -- 命令行执行成功; False -- 命令行执行失败
self.cmd_ret = True
def __del__(self):
self.disconnet_olt()
del self.__cmdlines
def __setattr__(self, name, value):
return super().__setattr__(name, value)
def __getattribute__(self, name):
return super().__getattribute__(name)
def parser_fholt_logincfg(self, configfile=r'config/config.ini'):
"""
解析配置文件/etc/config.ini, 获取OLT的登录信息
"""
parser = configparser.ConfigParser()
parser.read(os.path.join(os.getcwd(), self.configfile))
self.hostip = parser.get('OLT', 'ip')
self.hostport = parser.get('OLT', 'port')
self.login_promot = {}
self.login_promot['Login'] = parser.get('OLT', 'username')
self.login_promot['Password'] = parser.get('OLT', 'password')
self.login_promot['User'] = parser.get('OLT', 'user')
def init_olt(self):
"""初始化OLT的配置,获取OLT的登录信息"""
self.hostip = settings.OLT_IP
self.hostport = settings.TELNET_PORT
self.login_promot = settings.OLT_LOGIN
if settings.OLT_VERSION.startswith("AN5"):
self.version = OLT_VERSION_5K
elif settings.OLT_VERSION.startswith("AN6"):
self.version = OLT_VERSION_6K
else:
raise FHATCMDError('init_olt', "获取OLT版本异常.")
def connect_olt(self, *args):
"""
连接OLT
"""
connectTimes = 0
try:
if self.hostip is None: # 如果没有配置hostip,默认调用setting文件中OLT的配置
self.init_olt()
while self.__tn is None and connectTimes < MAX_CONNECT_TIMES:
connectTimes += 1
print("Trying connect %s of %d times!" % (self.hostip, connectTimes))
self.__tn = dut_connect.dut_connect_telnet(
host=self.hostip, port=self.hostport, login_promot=self.login_promot, promot='#')
else:
if connectTimes >= MAX_CONNECT_TIMES:
print("Connected to Device(%s) Timeout!" % self.hostip)
sys.exit(-1)
except Exception as err:
raise FHATCMDError('connect_olt', "连接OLT失败.")
def disconnet_olt(self):
"""
断开连接
"""
if self.__tn is not None:
print("Disconnect Device(%s)!" % self.hostip)
self.__tn.close()
self.__tn = None
def append_cmdlines(self, *args):
""" 添加命令行"""
for item in args:
if isinstance(item, list):
self.__cmdlines.extend(item)
elif isinstance(item, tuple):
self.__cmdlines.extend(list(item))
else:
self.__cmdlines.append(item)
@log_decare
def sendcmdlines(self, cmdlines=None, promot=b"#", timeout=5, delay=0):
"""
函数功能
下发命令到设备
函数参数:
@para cmdlines:
命令行列表, 如果为none,下发self.__cmdline中的命令
@para promot:
命令行之后完成之后提示符
@para timeout:
命令执行超时时间,单位s
@para delay:
命令执行间隔时间,单位s
"""
try:
if self.__tn is None:
self.connect_olt()
if cmdlines is not None:
self.append_cmdlines(cmdlines)
if self.version == OLT_VERSION_6K:
self.__cmdlines.insert(0, 'config\n')
self.__cmdlines.append("quit\n")
logger.debug("send command to device...")
while len(self.__cmdlines) != 0:
item = self.__cmdlines.pop(0)
if len(item.strip()) == 0:
continue
logger.debug("cmd:"+item)
self.__tn.write(bytes(item, encoding='utf8'))
cmd_rets = ''
while True: # 判断执行命令是否执行完成
ret = self.__tn.read_until(promot, timeout)
cmd_rets += ret.decode("utf-8")
if promot not in ret:
self.__tn.write(bytes(" ", encoding='utf8'))
else:
break
logger.info(cmd_rets)
return cmd_rets
time.sleep(delay)
except Exception as err:
raise FHATCMDError("sendcmdlines", "send cmd Failed!", traceback.format_exc())
def verify_cmds_exec(self, err_str=['failed', 'error', 'unknown command']):
"""
函数功能:
校验命令执行是否成功;
函数参数:
@para err_str: 字符串
校验匹配的字符串
函数返回参数: 布尔类型
检查返回的结果中是否存在err_str中的字符串;
如果不存在,命令执行成功,返回True;否则命令执行失败,返回False
"""
rets = self.__ret.lower()
self.cmd_ret = not verify_string_match(rets, err_str)
return self.cmd_ret
def get_card_status(self, slotno):
"""
获取线卡状态
"""
show_card_cmd = []
if self.version == "V4":
show_card_cmd = fhlib.OLT_V4.show_card()
if self.version == "V5":
show_card_cmd = fhlib.OLT_V5.show_card()
ret = self.sendcmdlines(show_card_cmd)
slot_status = ret.split('\n')[4:][slotno-1].strip().split()
self.disconnet_olt()
return dict(zip(('CARD', 'EXIST', 'CONFIG', 'DETECT', 'DETAIL'), slot_status))
def get_card_version(self, slotno):
self.disconnet_olt()
def get_maincard_version(self, backup=False):
ret = self.sendcmdlines(fhlib.OLT_V5.show_debugversion(backup))
version_info = ret.split('\n')[-2].strip()[-20:]
print("compiled:", version_info)
self.disconnet_olt()
return version_info
# def get_cmds_execute(self, cmds_func, *args, **kargs):
# """ 获取返回状态信息 """
# try:
# exec_cmds = cmds_func(*args, **kargs)
# ret = self.sendcmdlines(exec_cmds)
# self.disconnet_olt()
# return ret
# except Exception as err:
# raise FHATCMDError('get_cnds_execute', '获取执行命令返回状态信息失败', traceback.format_exc())
class FH_UNM():
# 通过TL1连接UNM
def __init__(self):
pass
def __del__(self):
pass
def connect_unm(self):
pass
def disconnect_unm(self):
pass
def sendcmd_tl1(self, cmdlines, promot=b"#", timeout=5, delay=0):
pass
def get_data_excel(filename, sheets=0):
"""
函数功能:通过excel文件获取ONU信息,返回格式为DataFrame
Excel文件格式为:
"""
data = pd.read_excel(filename, sheet_name=sheets)
return data
def verify_string_match(dst_str, cmp_str):
'''
@函数功能:
查找目的字符串dst_str中是否存在指定的字符串(字符串列表cmp_str)
@函数参数:
dst_str:str
cmp_str:list
return: bool
'''
ret = 0
for s in cmp_str:
if s in dst_str:
ret = 1
break
return bool(ret == 1)
# def auth_onu_auto():
# log = Logger()
# # host = "172.16.58.3"
# host = '192.168.0.168'
# tn_obj = dut_connect.dut_connect_telnet(host, 8003)
# # a = tn_obj.read_until(b"#")
# # print(str(a, encoding='utf8'))
# dut_host = ServiceConfig(tn_obj, log)
# cmd_ret = dut_host.send_cmd(["config\n", "t l 0\n"])
# # s_cmd = cmd_ret.split('\n')
# onuid = 1
# while True:
# cmd_ret = dut_host.send_cmd(["show discovery 1/17/8\n"])
# s_cmd = cmd_ret.split('\n')
# print(len(s_cmd))
# if len(s_cmd) >= 8:
# for index in range(4, len(s_cmd)-3):
# # print(len(s_cmd), s_cmd[index])
# onu_info = s_cmd[index].split()
# if len(onu_info) == 0:
# break
# print("onu_info:", onu_info)
# print("onuid:", onuid)
# auth_str = 'whitelist add phy-id %s type %s slot 17 pon 8 onuid %d\n' % (
# onu_info[2], onu_info[1], 129-onuid)
# ret = dut_host.send_cmd([auth_str])
# if -1 == ret.find("failed") or -1 == ret.find("Unknown"):
# onuid = onuid + 1
# print("onuid:", onuid)
# time.sleep(5)
# tn_obj.close()
def upgrad_olt_batch(filename, backup=False):
for ip in range(8):
fholt_obj = FH_OLT()
fholt_obj.oltip = '10.182.33.%d' % (182+ip)
fholt_obj.connect_olt()
# backup_status = bool(fholt_obj.get_card_status(10)['EXIST'] == "YES")
cmds = fhlib.OLT_V5.load_program(filename, backup=backup)
print("upgrade %s\n" % fholt_obj.oltip, cmds)
fholt_obj.append_cmdlines(cmds)
fholt_obj.sendcmdlines()
print(fholt_obj.cmd_ret)
if __name__ == "__main__":
# debug_func()
pass
| #!/usr/bin/env python
# coding=UTF-8
'''
@Desc: None
@Author: Teddy.tu
@Version: V1.0
@EMAIL: <EMAIL>
@License: (c)Copyright 2019-2020, Teddy.tu
@Date: 2020-01-12 21:24:26
@LastEditors: Teddy.tu
@LastEditTime: 2020-07-27 13:34:55
'''
import pandas as pd
import time
import configparser
import os
import sys
import traceback
try:
print("cwd:", os.getcwd())
if os.getcwd() not in sys.path:
sys.path.append(os.getcwd())
from lib.oltlib import fhlib
from lib.public.fhlog import logger, log_decare
from lib.public import dut_connect
from lib import settings
except Exception as err:
print("添加项目路径失败.")
print(err)
MAX_CONNECT_TIMES = 3 # 最大连接次数
OLT_VERSION_5K = 'V4'
OLT_VERSION_6K = 'V5'
class FHATException(Exception):
"""Base class for FHSTCException"""
class FHATCMDError(FHATException):
def __init__(self, command, error_message, stderr=''):
self.command = command
self.error_message = error_message.strip()
self.stderr = stderr
super().__init__(self.__str__())
def __str__(self):
msg = 'FHATCMDError raised while executing the command:"%s"\n error_message: "%s"' % (
self.command, self.error_message)
if self.stderr:
msg += '\n stderr: %s' % (self.stderr)
logger.error(msg)
return msg
class FH_OLT():
def __init__(self, version=OLT_VERSION_6K):
self.hostip = None # IP地址
self.hostport = None # 连接设备端口号
self.login_promot = dict() # 登录提示符
self.version = version # 版本信息
# 登录OLT telnet对象
self.__tn = None
self.connectTimes = 0
# 需要执行的命令行
self.__cmdlines = []
# 保存命令行运行log
# self.__ret = ""
# 命令执行结果, True -- 命令行执行成功; False -- 命令行执行失败
self.cmd_ret = True
def __del__(self):
self.disconnet_olt()
del self.__cmdlines
def __setattr__(self, name, value):
return super().__setattr__(name, value)
def __getattribute__(self, name):
return super().__getattribute__(name)
def parser_fholt_logincfg(self, configfile=r'config/config.ini'):
"""
解析配置文件/etc/config.ini, 获取OLT的登录信息
"""
parser = configparser.ConfigParser()
parser.read(os.path.join(os.getcwd(), self.configfile))
self.hostip = parser.get('OLT', 'ip')
self.hostport = parser.get('OLT', 'port')
self.login_promot = {}
self.login_promot['Login'] = parser.get('OLT', 'username')
self.login_promot['Password'] = parser.get('OLT', 'password')
self.login_promot['User'] = parser.get('OLT', 'user')
def init_olt(self):
"""初始化OLT的配置,获取OLT的登录信息"""
self.hostip = settings.OLT_IP
self.hostport = settings.TELNET_PORT
self.login_promot = settings.OLT_LOGIN
if settings.OLT_VERSION.startswith("AN5"):
self.version = OLT_VERSION_5K
elif settings.OLT_VERSION.startswith("AN6"):
self.version = OLT_VERSION_6K
else:
raise FHATCMDError('init_olt', "获取OLT版本异常.")
def connect_olt(self, *args):
"""
连接OLT
"""
connectTimes = 0
try:
if self.hostip is None: # 如果没有配置hostip,默认调用setting文件中OLT的配置
self.init_olt()
while self.__tn is None and connectTimes < MAX_CONNECT_TIMES:
connectTimes += 1
print("Trying connect %s of %d times!" % (self.hostip, connectTimes))
self.__tn = dut_connect.dut_connect_telnet(
host=self.hostip, port=self.hostport, login_promot=self.login_promot, promot='#')
else:
if connectTimes >= MAX_CONNECT_TIMES:
print("Connected to Device(%s) Timeout!" % self.hostip)
sys.exit(-1)
except Exception as err:
raise FHATCMDError('connect_olt', "连接OLT失败.")
def disconnet_olt(self):
"""
断开连接
"""
if self.__tn is not None:
print("Disconnect Device(%s)!" % self.hostip)
self.__tn.close()
self.__tn = None
def append_cmdlines(self, *args):
""" 添加命令行"""
for item in args:
if isinstance(item, list):
self.__cmdlines.extend(item)
elif isinstance(item, tuple):
self.__cmdlines.extend(list(item))
else:
self.__cmdlines.append(item)
@log_decare
def sendcmdlines(self, cmdlines=None, promot=b"#", timeout=5, delay=0):
"""
函数功能
下发命令到设备
函数参数:
@para cmdlines:
命令行列表, 如果为none,下发self.__cmdline中的命令
@para promot:
命令行之后完成之后提示符
@para timeout:
命令执行超时时间,单位s
@para delay:
命令执行间隔时间,单位s
"""
try:
if self.__tn is None:
self.connect_olt()
if cmdlines is not None:
self.append_cmdlines(cmdlines)
if self.version == OLT_VERSION_6K:
self.__cmdlines.insert(0, 'config\n')
self.__cmdlines.append("quit\n")
logger.debug("send command to device...")
while len(self.__cmdlines) != 0:
item = self.__cmdlines.pop(0)
if len(item.strip()) == 0:
continue
logger.debug("cmd:"+item)
self.__tn.write(bytes(item, encoding='utf8'))
cmd_rets = ''
while True: # 判断执行命令是否执行完成
ret = self.__tn.read_until(promot, timeout)
cmd_rets += ret.decode("utf-8")
if promot not in ret:
self.__tn.write(bytes(" ", encoding='utf8'))
else:
break
logger.info(cmd_rets)
return cmd_rets
time.sleep(delay)
except Exception as err:
raise FHATCMDError("sendcmdlines", "send cmd Failed!", traceback.format_exc())
def verify_cmds_exec(self, err_str=['failed', 'error', 'unknown command']):
"""
函数功能:
校验命令执行是否成功;
函数参数:
@para err_str: 字符串
校验匹配的字符串
函数返回参数: 布尔类型
检查返回的结果中是否存在err_str中的字符串;
如果不存在,命令执行成功,返回True;否则命令执行失败,返回False
"""
rets = self.__ret.lower()
self.cmd_ret = not verify_string_match(rets, err_str)
return self.cmd_ret
def get_card_status(self, slotno):
"""
获取线卡状态
"""
show_card_cmd = []
if self.version == "V4":
show_card_cmd = fhlib.OLT_V4.show_card()
if self.version == "V5":
show_card_cmd = fhlib.OLT_V5.show_card()
ret = self.sendcmdlines(show_card_cmd)
slot_status = ret.split('\n')[4:][slotno-1].strip().split()
self.disconnet_olt()
return dict(zip(('CARD', 'EXIST', 'CONFIG', 'DETECT', 'DETAIL'), slot_status))
def get_card_version(self, slotno):
self.disconnet_olt()
def get_maincard_version(self, backup=False):
ret = self.sendcmdlines(fhlib.OLT_V5.show_debugversion(backup))
version_info = ret.split('\n')[-2].strip()[-20:]
print("compiled:", version_info)
self.disconnet_olt()
return version_info
# def get_cmds_execute(self, cmds_func, *args, **kargs):
# """ 获取返回状态信息 """
# try:
# exec_cmds = cmds_func(*args, **kargs)
# ret = self.sendcmdlines(exec_cmds)
# self.disconnet_olt()
# return ret
# except Exception as err:
# raise FHATCMDError('get_cnds_execute', '获取执行命令返回状态信息失败', traceback.format_exc())
class FH_UNM():
# 通过TL1连接UNM
def __init__(self):
pass
def __del__(self):
pass
def connect_unm(self):
pass
def disconnect_unm(self):
pass
def sendcmd_tl1(self, cmdlines, promot=b"#", timeout=5, delay=0):
pass
def get_data_excel(filename, sheets=0):
"""
函数功能:通过excel文件获取ONU信息,返回格式为DataFrame
Excel文件格式为:
"""
data = pd.read_excel(filename, sheet_name=sheets)
return data
def verify_string_match(dst_str, cmp_str):
'''
@函数功能:
查找目的字符串dst_str中是否存在指定的字符串(字符串列表cmp_str)
@函数参数:
dst_str:str
cmp_str:list
return: bool
'''
ret = 0
for s in cmp_str:
if s in dst_str:
ret = 1
break
return bool(ret == 1)
# def auth_onu_auto():
# log = Logger()
# # host = "172.16.58.3"
# host = '192.168.0.168'
# tn_obj = dut_connect.dut_connect_telnet(host, 8003)
# # a = tn_obj.read_until(b"#")
# # print(str(a, encoding='utf8'))
# dut_host = ServiceConfig(tn_obj, log)
# cmd_ret = dut_host.send_cmd(["config\n", "t l 0\n"])
# # s_cmd = cmd_ret.split('\n')
# onuid = 1
# while True:
# cmd_ret = dut_host.send_cmd(["show discovery 1/17/8\n"])
# s_cmd = cmd_ret.split('\n')
# print(len(s_cmd))
# if len(s_cmd) >= 8:
# for index in range(4, len(s_cmd)-3):
# # print(len(s_cmd), s_cmd[index])
# onu_info = s_cmd[index].split()
# if len(onu_info) == 0:
# break
# print("onu_info:", onu_info)
# print("onuid:", onuid)
# auth_str = 'whitelist add phy-id %s type %s slot 17 pon 8 onuid %d\n' % (
# onu_info[2], onu_info[1], 129-onuid)
# ret = dut_host.send_cmd([auth_str])
# if -1 == ret.find("failed") or -1 == ret.find("Unknown"):
# onuid = onuid + 1
# print("onuid:", onuid)
# time.sleep(5)
# tn_obj.close()
def upgrad_olt_batch(filename, backup=False):
for ip in range(8):
fholt_obj = FH_OLT()
fholt_obj.oltip = '10.182.33.%d' % (182+ip)
fholt_obj.connect_olt()
# backup_status = bool(fholt_obj.get_card_status(10)['EXIST'] == "YES")
cmds = fhlib.OLT_V5.load_program(filename, backup=backup)
print("upgrade %s\n" % fholt_obj.oltip, cmds)
fholt_obj.append_cmdlines(cmds)
fholt_obj.sendcmdlines()
print(fholt_obj.cmd_ret)
if __name__ == "__main__":
# debug_func()
pass | zh | 0.28421 | #!/usr/bin/env python # coding=UTF-8 @Desc: None @Author: Teddy.tu @Version: V1.0 @EMAIL: <EMAIL> @License: (c)Copyright 2019-2020, Teddy.tu @Date: 2020-01-12 21:24:26 @LastEditors: Teddy.tu @LastEditTime: 2020-07-27 13:34:55 # 最大连接次数 Base class for FHSTCException # IP地址 # 连接设备端口号 # 登录提示符 # 版本信息 # 登录OLT telnet对象 # 需要执行的命令行 # 保存命令行运行log # self.__ret = "" # 命令执行结果, True -- 命令行执行成功; False -- 命令行执行失败 解析配置文件/etc/config.ini, 获取OLT的登录信息 初始化OLT的配置,获取OLT的登录信息 连接OLT # 如果没有配置hostip,默认调用setting文件中OLT的配置 断开连接 添加命令行 函数功能 下发命令到设备 函数参数: @para cmdlines: 命令行列表, 如果为none,下发self.__cmdline中的命令 @para promot: 命令行之后完成之后提示符 @para timeout: 命令执行超时时间,单位s @para delay: 命令执行间隔时间,单位s # 判断执行命令是否执行完成 函数功能: 校验命令执行是否成功; 函数参数: @para err_str: 字符串 校验匹配的字符串 函数返回参数: 布尔类型 检查返回的结果中是否存在err_str中的字符串; 如果不存在,命令执行成功,返回True;否则命令执行失败,返回False 获取线卡状态 # def get_cmds_execute(self, cmds_func, *args, **kargs): # """ 获取返回状态信息 """ # try: # exec_cmds = cmds_func(*args, **kargs) # ret = self.sendcmdlines(exec_cmds) # self.disconnet_olt() # return ret # except Exception as err: # raise FHATCMDError('get_cnds_execute', '获取执行命令返回状态信息失败', traceback.format_exc()) # 通过TL1连接UNM 函数功能:通过excel文件获取ONU信息,返回格式为DataFrame Excel文件格式为: @函数功能: 查找目的字符串dst_str中是否存在指定的字符串(字符串列表cmp_str) @函数参数: dst_str:str cmp_str:list return: bool # def auth_onu_auto(): # log = Logger() # # host = "172.16.58.3" # host = '192.168.0.168' # tn_obj = dut_connect.dut_connect_telnet(host, 8003) # # a = tn_obj.read_until(b"#") # # print(str(a, encoding='utf8')) # dut_host = ServiceConfig(tn_obj, log) # cmd_ret = dut_host.send_cmd(["config\n", "t l 0\n"]) # # s_cmd = cmd_ret.split('\n') # onuid = 1 # while True: # cmd_ret = dut_host.send_cmd(["show discovery 1/17/8\n"]) # s_cmd = cmd_ret.split('\n') # print(len(s_cmd)) # if len(s_cmd) >= 8: # for index in range(4, len(s_cmd)-3): # # print(len(s_cmd), s_cmd[index]) # onu_info = s_cmd[index].split() # if len(onu_info) == 0: # break # print("onu_info:", onu_info) # print("onuid:", onuid) # auth_str = 'whitelist add phy-id %s type %s slot 17 pon 8 onuid %d\n' % ( # onu_info[2], onu_info[1], 129-onuid) # ret = dut_host.send_cmd([auth_str]) # if -1 == ret.find("failed") or -1 == ret.find("Unknown"): # onuid = onuid + 1 # print("onuid:", onuid) # time.sleep(5) # tn_obj.close() # backup_status = bool(fholt_obj.get_card_status(10)['EXIST'] == "YES") # debug_func() | 2.197427 | 2 |
Scheduler/getlocation.py | ellieasona/MIRO | 0 | 6616582 | <gh_stars>0
import cv2
import imutils
def get_color_location(lower, upper, frame):
# resize the frame, blur it, and convert it to the HSV
# color space
#frame = imutils.resize(frame, width=600)
#blurred = cv2.GaussianBlur(frame, (11, 11), 0)
blurred = frame
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# construct a mask for the color, then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, lower, upper)
mask = cv2.erode(mask, None, iterations=1)
mask = cv2.dilate(mask, None, iterations=1)
#cv2.imshow("mask", mask)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# # only proceed if the radius meets a minimum size
# if radius > 0:
# # draw the circle and centroid on the frame,
# # then update the list of tracked points
# cv2.circle(frame, (int(x), int(y)), int(radius),
# (0, 255, 255), 2)
# cv2.circle(frame, center, 5, (0, 0, 255), -1)
# update the points queue
return center
def getdotmatrix():
cap = cv2.VideoCapture(1)
_, frame = cap.read()
#frame = cv2.imread("miro.jpg")
# cv2.imshow('image', frame)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
height, width = frame.shape[:2]
#print(height, width)
yellowlower = (22, 148, 63)
yellowupper = (105, 255, 255)
purplelower = (61, 105, 81)
purpleupper = (153, 255, 255)
orangelower = (0, 200, 128)
orangeupper = (24, 255, 255)
greenlower = (30, 72, 84)
greenupper = (96, 255, 255)
points = []
#print("green")
points.append(get_color_location(greenlower, greenupper, frame))
#print("purple")
points.append(get_color_location(purplelower, purpleupper, frame))
#print("yellow")
points.append(get_color_location(yellowlower, yellowupper, frame))
#print("orange")
points.append(get_color_location(orangelower, orangeupper, frame))
return points, height, width
| import cv2
import imutils
def get_color_location(lower, upper, frame):
# resize the frame, blur it, and convert it to the HSV
# color space
#frame = imutils.resize(frame, width=600)
#blurred = cv2.GaussianBlur(frame, (11, 11), 0)
blurred = frame
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# construct a mask for the color, then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, lower, upper)
mask = cv2.erode(mask, None, iterations=1)
mask = cv2.dilate(mask, None, iterations=1)
#cv2.imshow("mask", mask)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# # only proceed if the radius meets a minimum size
# if radius > 0:
# # draw the circle and centroid on the frame,
# # then update the list of tracked points
# cv2.circle(frame, (int(x), int(y)), int(radius),
# (0, 255, 255), 2)
# cv2.circle(frame, center, 5, (0, 0, 255), -1)
# update the points queue
return center
def getdotmatrix():
cap = cv2.VideoCapture(1)
_, frame = cap.read()
#frame = cv2.imread("miro.jpg")
# cv2.imshow('image', frame)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
height, width = frame.shape[:2]
#print(height, width)
yellowlower = (22, 148, 63)
yellowupper = (105, 255, 255)
purplelower = (61, 105, 81)
purpleupper = (153, 255, 255)
orangelower = (0, 200, 128)
orangeupper = (24, 255, 255)
greenlower = (30, 72, 84)
greenupper = (96, 255, 255)
points = []
#print("green")
points.append(get_color_location(greenlower, greenupper, frame))
#print("purple")
points.append(get_color_location(purplelower, purpleupper, frame))
#print("yellow")
points.append(get_color_location(yellowlower, yellowupper, frame))
#print("orange")
points.append(get_color_location(orangelower, orangeupper, frame))
return points, height, width | en | 0.517224 | # resize the frame, blur it, and convert it to the HSV # color space #frame = imutils.resize(frame, width=600) #blurred = cv2.GaussianBlur(frame, (11, 11), 0) # construct a mask for the color, then perform # a series of dilations and erosions to remove any small # blobs left in the mask #cv2.imshow("mask", mask) #cv2.waitKey(0) #cv2.destroyAllWindows() # find contours in the mask and initialize the current # (x, y) center of the ball # only proceed if at least one contour was found # find the largest contour in the mask, then use # it to compute the minimum enclosing circle and # centroid # # only proceed if the radius meets a minimum size # if radius > 0: # # draw the circle and centroid on the frame, # # then update the list of tracked points # cv2.circle(frame, (int(x), int(y)), int(radius), # (0, 255, 255), 2) # cv2.circle(frame, center, 5, (0, 0, 255), -1) # update the points queue #frame = cv2.imread("miro.jpg") # cv2.imshow('image', frame) # cv2.waitKey(0) # cv2.destroyAllWindows() #print(height, width) #print("green") #print("purple") #print("yellow") #print("orange") | 3.004594 | 3 |
home/urls.py | jdlovins/knights381 | 0 | 6616583 | <reponame>jdlovins/knights381<gh_stars>0
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^account/login/', views.login_user, name='account-login'),
url(r'^account/register/', views.register_user, name='account-register'),
url(r'^account/logout/', views.logout_user, name='account-logout'),
url(r'^account/view', views.user_profile, name='account-profile'),
url(r'^$', views.index, name='index'),
url(r'^catalog/', views.catalog, name='catalog'),
url(r'^contact/', views.contact, name='contact'),
url(r'^cart/', views.cart, name='cart')
]
| from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^account/login/', views.login_user, name='account-login'),
url(r'^account/register/', views.register_user, name='account-register'),
url(r'^account/logout/', views.logout_user, name='account-logout'),
url(r'^account/view', views.user_profile, name='account-profile'),
url(r'^$', views.index, name='index'),
url(r'^catalog/', views.catalog, name='catalog'),
url(r'^contact/', views.contact, name='contact'),
url(r'^cart/', views.cart, name='cart')
] | none | 1 | 1.678715 | 2 | |
maths/Cyclically rotate an array by one.py | debanjan611/Python | 0 | 6616584 | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 26 10:45:27 2021
@author: <NAME>
"""
if __name__=='__main__':
for _ in range(int(input())):
A = list(map(int,input().rstrip().split()))
x = A[len(A)-1]
for i in range(len(A)-1,0,-1):
A[i] = A[i-1]
A[0] = x
print(A) | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 26 10:45:27 2021
@author: <NAME>
"""
if __name__=='__main__':
for _ in range(int(input())):
A = list(map(int,input().rstrip().split()))
x = A[len(A)-1]
for i in range(len(A)-1,0,-1):
A[i] = A[i-1]
A[0] = x
print(A) | en | 0.730947 | # -*- coding: utf-8 -*- Created on Thu Aug 26 10:45:27 2021
@author: <NAME> | 3.593274 | 4 |
src/erpbrasil/edoc/provedores/cidades.py | Engenere/erpbrasil.edoc | 8 | 6616585 | # coding=utf-8
# Copyright (C) 2019 <NAME> - KMEE
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from erpbrasil.edoc.provedores.dsf import Dsf
from erpbrasil.edoc.provedores.ginfes import Ginfes
from erpbrasil.edoc.provedores.issnet import Issnet
from erpbrasil.edoc.provedores.paulistana import Paulistana
cidades = {
1501402: Dsf, # Belem-PA
2211001: Dsf, # Teresina - PI
3132404: Ginfes, # ITAJUBA - MG
3170206: Dsf, # Uberlândia-MG
3303500: Dsf, # Nova Iguaçu - RJ
3509502: Dsf, # Campinas - SP
5002704: Dsf, # Campo Grande - MS
3550308: Paulistana, # São Paulo - SP
3543402: Issnet, # Ribeirão Preto - SP
3301702: Issnet, # Duque de Caxias - RJ
}
def NFSeFactory(transmissao, ambiente, cidade_ibge, cnpj_prestador, im_prestador):
"""Factory"""
return cidades[int(cidade_ibge)](
transmissao, ambiente, cidade_ibge, cnpj_prestador, im_prestador)
| # coding=utf-8
# Copyright (C) 2019 <NAME> - KMEE
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from erpbrasil.edoc.provedores.dsf import Dsf
from erpbrasil.edoc.provedores.ginfes import Ginfes
from erpbrasil.edoc.provedores.issnet import Issnet
from erpbrasil.edoc.provedores.paulistana import Paulistana
cidades = {
1501402: Dsf, # Belem-PA
2211001: Dsf, # Teresina - PI
3132404: Ginfes, # ITAJUBA - MG
3170206: Dsf, # Uberlândia-MG
3303500: Dsf, # Nova Iguaçu - RJ
3509502: Dsf, # Campinas - SP
5002704: Dsf, # Campo Grande - MS
3550308: Paulistana, # São Paulo - SP
3543402: Issnet, # Ribeirão Preto - SP
3301702: Issnet, # Duque de Caxias - RJ
}
def NFSeFactory(transmissao, ambiente, cidade_ibge, cnpj_prestador, im_prestador):
"""Factory"""
return cidades[int(cidade_ibge)](
transmissao, ambiente, cidade_ibge, cnpj_prestador, im_prestador)
| en | 0.329823 | # coding=utf-8 # Copyright (C) 2019 <NAME> - KMEE # Belem-PA # Teresina - PI # ITAJUBA - MG # Uberlândia-MG # Nova Iguaçu - RJ # Campinas - SP # Campo Grande - MS # São Paulo - SP # Ribeirão Preto - SP # Duque de Caxias - RJ Factory | 1.590131 | 2 |
bit_online_code_helper/bitonline/OnlineTestCodeManager.py | Crawler995/BITOnlineCodeHelper | 1 | 6616586 | import re
import time
from bit_online_code_helper.log.LogManager import *
class _CompileStatus(Enum):
PENDING = '正等待编译'
COMPILE_ERROR = '程序编译失败'
COMPILE_SUCCESS = '程序已处理完毕'
class _OnlineTestCodeManager:
def __init__(self):
self.__headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/77.0.3865.120 Safari/537.36 '
}
self.__session = None
def set_session(self, session):
self.__session = session
def __get_post_data(self, name, page_text):
regx = re.compile('<input[\\s\\S]+?name=\"' + name + '\"[\\s\\S]+?value=\"(.+?)\"')
return regx.findall(page_text)[0]
def run(self, source_file_path, problem_url):
if self.__commit_online(source_file_path, problem_url):
self.__is_commit_pass(problem_url)
def __get_online_support_language(self, commit_page_text):
regx = re.compile('<option value="(\\d+)">(.+?)</option>')
return regx.findall(commit_page_text)
def __get_language_type(self, commit_page_text, source_file_path):
source_file_ext_name = source_file_path.split('.')[-1]
# e.g. [('1', 'C (GCC 3.3)'), ('2', 'C++ (G++ 3.3)')]
online_support_language = self.__get_online_support_language(commit_page_text)
type_map = [('c', 'C'), ('cpp', 'C++')]
for local_support_type in type_map:
if source_file_ext_name == local_support_type[0]:
for online_support_type in online_support_language:
if local_support_type[1] in online_support_type[1]:
return online_support_type[0]
return '-1'
def __commit_online(self, source_file_path, problem_url):
tip(OnlineTestCodeLogs.COMMIT_START)
commit_page_url = problem_url.replace('view', 'submit')
data_item = ['sesskey', '_qf__submit_form', 'sourcefile', 'submitbutton']
data = {}
try:
commit_page_text = self.__session.get(commit_page_url, headers=self.__headers).text
if '时间已到' in commit_page_text:
tip(OnlineTestCodeLogs.DEADLINE_PASS_FAILED)
return False
for item in data_item:
data[item] = self.__get_post_data(item, commit_page_text)
self.__get_language_type(commit_page_text, source_file_path)
data['id'] = re.compile('php\\?id=(\\d+)').findall(commit_page_url)[0]
data['code'] = open(source_file_path, 'rb').read().decode()
language = self.__get_language_type(commit_page_text, source_file_path)
if language == '-1':
tip(OnlineTestCodeLogs.NOT_SUPPORT_LANGUAGE_FAILED)
return False
data['language'] = language
commit_url = 'http://lexue.bit.edu.cn/mod/programming/submit.php'
self.__session.post(commit_url, data=data, headers=self.__headers)
tip(OnlineTestCodeLogs.COMMIT_SUCCESS)
divide_line()
return True
except:
tip(OnlineTestCodeLogs.COMPIT_FAILED)
return False
def __get_compile_status(self, test_res_page_text):
if _CompileStatus.PENDING.value in test_res_page_text:
return _CompileStatus.PENDING
elif _CompileStatus.COMPILE_ERROR.value in test_res_page_text:
return _CompileStatus.COMPILE_ERROR
elif _CompileStatus.COMPILE_SUCCESS.value in test_res_page_text:
return _CompileStatus.COMPILE_SUCCESS
def __is_commit_pass(self, problem_url):
test_res_url = problem_url.replace('view', 'result')
test_res_page_text = ''
while True:
test_res_page_text = self.__session.get(test_res_url, headers=self.__headers).text
compile_status = self.__get_compile_status(test_res_page_text)
if compile_status == _CompileStatus.COMPILE_ERROR:
tip(OnlineTestCodeLogs.COMPILE_FAILED)
return False
elif compile_status == _CompileStatus.COMPILE_SUCCESS:
break
else:
time.sleep(1)
continue
total_test_case_num, \
test_case_pass_num, \
test_case_fail_num = self.__parse_test_res_baseinfo(test_res_page_text)
if total_test_case_num == test_case_pass_num:
tip(OnlineTestCodeLogs.TEST_SUCCESS)
else:
tip(OnlineTestCodeLogs.TEST_FAILED)
print('通过%d个用例,失败%d个用例。' % (test_case_pass_num, test_case_fail_num))
def __parse_test_res_baseinfo(self, test_res_page_text):
test_res_baseinfo_regx = re.compile('测试结果:共 (\\d+?) 个测试用例,'
'您的程序通过了其中的 (\\d+?) 个,未能通过的有 (\\d+?) 个')
regx_res = test_res_baseinfo_regx.findall(test_res_page_text)
total_test_case_num = int(regx_res[0][0])
test_case_pass_num = int(regx_res[0][1])
test_case_fail_num = int(regx_res[0][2])
return total_test_case_num, test_case_pass_num, test_case_fail_num
commit_online_manager = _OnlineTestCodeManager()
| import re
import time
from bit_online_code_helper.log.LogManager import *
class _CompileStatus(Enum):
PENDING = '正等待编译'
COMPILE_ERROR = '程序编译失败'
COMPILE_SUCCESS = '程序已处理完毕'
class _OnlineTestCodeManager:
def __init__(self):
self.__headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/77.0.3865.120 Safari/537.36 '
}
self.__session = None
def set_session(self, session):
self.__session = session
def __get_post_data(self, name, page_text):
regx = re.compile('<input[\\s\\S]+?name=\"' + name + '\"[\\s\\S]+?value=\"(.+?)\"')
return regx.findall(page_text)[0]
def run(self, source_file_path, problem_url):
if self.__commit_online(source_file_path, problem_url):
self.__is_commit_pass(problem_url)
def __get_online_support_language(self, commit_page_text):
regx = re.compile('<option value="(\\d+)">(.+?)</option>')
return regx.findall(commit_page_text)
def __get_language_type(self, commit_page_text, source_file_path):
source_file_ext_name = source_file_path.split('.')[-1]
# e.g. [('1', 'C (GCC 3.3)'), ('2', 'C++ (G++ 3.3)')]
online_support_language = self.__get_online_support_language(commit_page_text)
type_map = [('c', 'C'), ('cpp', 'C++')]
for local_support_type in type_map:
if source_file_ext_name == local_support_type[0]:
for online_support_type in online_support_language:
if local_support_type[1] in online_support_type[1]:
return online_support_type[0]
return '-1'
def __commit_online(self, source_file_path, problem_url):
tip(OnlineTestCodeLogs.COMMIT_START)
commit_page_url = problem_url.replace('view', 'submit')
data_item = ['sesskey', '_qf__submit_form', 'sourcefile', 'submitbutton']
data = {}
try:
commit_page_text = self.__session.get(commit_page_url, headers=self.__headers).text
if '时间已到' in commit_page_text:
tip(OnlineTestCodeLogs.DEADLINE_PASS_FAILED)
return False
for item in data_item:
data[item] = self.__get_post_data(item, commit_page_text)
self.__get_language_type(commit_page_text, source_file_path)
data['id'] = re.compile('php\\?id=(\\d+)').findall(commit_page_url)[0]
data['code'] = open(source_file_path, 'rb').read().decode()
language = self.__get_language_type(commit_page_text, source_file_path)
if language == '-1':
tip(OnlineTestCodeLogs.NOT_SUPPORT_LANGUAGE_FAILED)
return False
data['language'] = language
commit_url = 'http://lexue.bit.edu.cn/mod/programming/submit.php'
self.__session.post(commit_url, data=data, headers=self.__headers)
tip(OnlineTestCodeLogs.COMMIT_SUCCESS)
divide_line()
return True
except:
tip(OnlineTestCodeLogs.COMPIT_FAILED)
return False
def __get_compile_status(self, test_res_page_text):
if _CompileStatus.PENDING.value in test_res_page_text:
return _CompileStatus.PENDING
elif _CompileStatus.COMPILE_ERROR.value in test_res_page_text:
return _CompileStatus.COMPILE_ERROR
elif _CompileStatus.COMPILE_SUCCESS.value in test_res_page_text:
return _CompileStatus.COMPILE_SUCCESS
def __is_commit_pass(self, problem_url):
test_res_url = problem_url.replace('view', 'result')
test_res_page_text = ''
while True:
test_res_page_text = self.__session.get(test_res_url, headers=self.__headers).text
compile_status = self.__get_compile_status(test_res_page_text)
if compile_status == _CompileStatus.COMPILE_ERROR:
tip(OnlineTestCodeLogs.COMPILE_FAILED)
return False
elif compile_status == _CompileStatus.COMPILE_SUCCESS:
break
else:
time.sleep(1)
continue
total_test_case_num, \
test_case_pass_num, \
test_case_fail_num = self.__parse_test_res_baseinfo(test_res_page_text)
if total_test_case_num == test_case_pass_num:
tip(OnlineTestCodeLogs.TEST_SUCCESS)
else:
tip(OnlineTestCodeLogs.TEST_FAILED)
print('通过%d个用例,失败%d个用例。' % (test_case_pass_num, test_case_fail_num))
def __parse_test_res_baseinfo(self, test_res_page_text):
test_res_baseinfo_regx = re.compile('测试结果:共 (\\d+?) 个测试用例,'
'您的程序通过了其中的 (\\d+?) 个,未能通过的有 (\\d+?) 个')
regx_res = test_res_baseinfo_regx.findall(test_res_page_text)
total_test_case_num = int(regx_res[0][0])
test_case_pass_num = int(regx_res[0][1])
test_case_fail_num = int(regx_res[0][2])
return total_test_case_num, test_case_pass_num, test_case_fail_num
commit_online_manager = _OnlineTestCodeManager()
| en | 0.24326 | # e.g. [('1', 'C (GCC 3.3)'), ('2', 'C++ (G++ 3.3)')] | 2.279084 | 2 |
src/dapi_session.py | Refinitiv-API-Samples/Example.EikonAPI.MessengerChatBot.Python.SymbologyChatBot | 1 | 6616587 | <filename>src/dapi_session.py<gh_stars>1-10
# |-----------------------------------------------------------------------------
# | This source code is provided under the Apache 2.0 license --
# | and is provided AS IS with no warranty or guarantee of fit for purpose. --
# | See the project's LICENSE.md for details. --
# | Copyright Refinitiv 2020-2021. All rights reserved. --
# |-----------------------------------------------------------------------------
# |-----------------------------------------------------------------------------
# | Refinitiv Eikon API demo app/module to get symbology --
# |-----------------------------------------------------------------------------
# Import the required libraries for Eikon and JSON operations
import eikon as ek
import logging
import json
class DAPISessionManagement:
dapi_app_key = ''
# Constructor function
def __init__(self, app_key):
self.dapi_app_key = app_key
ek.set_app_key(self.dapi_app_key)
'''
convert symbol to targe instrument code type with ek.get_data function. The supported fields are
- TR.RIC
- TR.ISIN
- TR.SEDOL
- TR.CUSIP
- TR.LipperRICCode
- TR.OrganizationID
'''
def convert_symbology(self, symbol, target_symbol_type = 'TR.ISIN'):
converted_result = True
try:
response = ek.get_data(symbol,target_symbol_type, raw_output = True)
if 'error' in response or not response['data'][0][1]: # The get_data can returns both 'error' and just empty/null result
converted_result = False
return converted_result, response
except Exception as ex:
logging.error('Data API: get_data exception failure: %s' % ex)
return False, None
# verify if Eikon Data API is connect to Refinitiv Workspace/Eikon Desktop application
def verify_desktop_connection(self):
ek_port_number = ek.get_port_number()
if int(ek_port_number) == 9000:
return True
else:
return False
# =============================== Main Process, For verifying your Eikon Data API Access purpose ============================
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s: %(levelname)s:%(name)s :%(message)s', level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S')
# Authentication Variables
_app_key = '---YOUR DATA API APPKEY---'
"""
Input above DAPI App Key information and run this module with the following command in a console
$>python dapi_session.py
"""
print('Setting Eikon Data API App Key')
dapi_session = DAPISessionManagement(_app_key)
if dapi_session.verify_desktop_connection(): #if init session with Refinitiv Workspace/Eikon Desktop success
result, response = dapi_session.convert_symbology('IBM.N','TR.ISIN')
print(response) | <filename>src/dapi_session.py<gh_stars>1-10
# |-----------------------------------------------------------------------------
# | This source code is provided under the Apache 2.0 license --
# | and is provided AS IS with no warranty or guarantee of fit for purpose. --
# | See the project's LICENSE.md for details. --
# | Copyright Refinitiv 2020-2021. All rights reserved. --
# |-----------------------------------------------------------------------------
# |-----------------------------------------------------------------------------
# | Refinitiv Eikon API demo app/module to get symbology --
# |-----------------------------------------------------------------------------
# Import the required libraries for Eikon and JSON operations
import eikon as ek
import logging
import json
class DAPISessionManagement:
dapi_app_key = ''
# Constructor function
def __init__(self, app_key):
self.dapi_app_key = app_key
ek.set_app_key(self.dapi_app_key)
'''
convert symbol to targe instrument code type with ek.get_data function. The supported fields are
- TR.RIC
- TR.ISIN
- TR.SEDOL
- TR.CUSIP
- TR.LipperRICCode
- TR.OrganizationID
'''
def convert_symbology(self, symbol, target_symbol_type = 'TR.ISIN'):
converted_result = True
try:
response = ek.get_data(symbol,target_symbol_type, raw_output = True)
if 'error' in response or not response['data'][0][1]: # The get_data can returns both 'error' and just empty/null result
converted_result = False
return converted_result, response
except Exception as ex:
logging.error('Data API: get_data exception failure: %s' % ex)
return False, None
# verify if Eikon Data API is connect to Refinitiv Workspace/Eikon Desktop application
def verify_desktop_connection(self):
ek_port_number = ek.get_port_number()
if int(ek_port_number) == 9000:
return True
else:
return False
# =============================== Main Process, For verifying your Eikon Data API Access purpose ============================
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s: %(levelname)s:%(name)s :%(message)s', level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S')
# Authentication Variables
_app_key = '---YOUR DATA API APPKEY---'
"""
Input above DAPI App Key information and run this module with the following command in a console
$>python dapi_session.py
"""
print('Setting Eikon Data API App Key')
dapi_session = DAPISessionManagement(_app_key)
if dapi_session.verify_desktop_connection(): #if init session with Refinitiv Workspace/Eikon Desktop success
result, response = dapi_session.convert_symbology('IBM.N','TR.ISIN')
print(response) | en | 0.481791 | # |----------------------------------------------------------------------------- # | This source code is provided under the Apache 2.0 license -- # | and is provided AS IS with no warranty or guarantee of fit for purpose. -- # | See the project's LICENSE.md for details. -- # | Copyright Refinitiv 2020-2021. All rights reserved. -- # |----------------------------------------------------------------------------- # |----------------------------------------------------------------------------- # | Refinitiv Eikon API demo app/module to get symbology -- # |----------------------------------------------------------------------------- # Import the required libraries for Eikon and JSON operations # Constructor function convert symbol to targe instrument code type with ek.get_data function. The supported fields are - TR.RIC - TR.ISIN - TR.SEDOL - TR.CUSIP - TR.LipperRICCode - TR.OrganizationID # The get_data can returns both 'error' and just empty/null result # verify if Eikon Data API is connect to Refinitiv Workspace/Eikon Desktop application # =============================== Main Process, For verifying your Eikon Data API Access purpose ============================ # Authentication Variables Input above DAPI App Key information and run this module with the following command in a console $>python dapi_session.py #if init session with Refinitiv Workspace/Eikon Desktop success | 1.791033 | 2 |
main.py | GwenaelGriffon/Whallet-Python2.7 | 0 | 6616588 | <filename>main.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created by <NAME>
"""
import time
import json
import requests
from bittrex import Bittrex
#Getting API keys
json_secrets=open("secrets.json")
secrets = json.load(json_secrets)
api_key = secrets["key"]
api_secret = secrets["secret"]
#Initialization
list_currency=[]
list_balance=[]
list_available=[]
list_pending=[]
list_last=[]
list_btcvalue=[]
list_market=[]
list_percent=[]
totalworth=0
i=0
n=0
k=0
btceuro=0
btcusd=0
wallet1=Bittrex(api_key,api_secret)
data=wallet1.get_balances()
#Date
print "\n","=====",time.strftime("%A %d %B %Y %H:%M:%S"),"=====","\n"
#Requesting BTC price
contenteuro=requests.get("https://www.bitstamp.net/api/v2/ticker/btceur/")
contenteuro=contenteuro.json()
btceuro=contenteuro["last"]
contentusd=requests.get("https://www.bitstamp.net/api/v2/ticker/btcusd/")
contentusd=contentusd.json()
btcusd=contentusd["last"]
#BTC percentage
btcfloat=float(btcusd)
btcopen=contentusd["open"]
btcopen=float(btcopen)
btcpercent=(1-(btcopen/btcfloat))*100
btcpercent=float(btcpercent)
#Picking non empty currencies
while i<len(data["result"]):
if data["result"][i]["Balance"]==0 and data["result"][i]["Pending"]==0:
i+=1
else:
list_currency.append((data["result"][i]["Currency"]))
list_market.append(("BTC-"+data["result"][i]["Currency"]))
list_balance.append(data["result"][i]["Balance"])
list_available.append(data["result"][i]["Available"])
list_pending.append(data["result"][i]["Pending"])
i+=1
amount=len(list_currency)
#Getting currency price
while n<=amount-1:
if list_market[n]=="BTC-BTC":
list_last.append(btceuro+ " eur | "+btcusd+ " usd")
list_btcvalue.append(list_balance[n])
list_percent.append(btcpercent)
n+=1
else:
ticker=wallet1.get_ticker(list_market[n])
summary=requests.get("https://bittrex.com/api/v1.1/public/getmarketsummary?market="+list_market[n])
summary=summary.json()
summary=summary["result"][0]["PrevDay"]
summary=float(summary)
percent=(1-(summary/ticker["result"]["Last"]))*100
percent=float(percent)
ticker=ticker["result"]["Last"]
ticker=float(ticker)
list_last.append(ticker)
list_btcvalue.append(list_last[n]*list_balance[n])
list_percent.append(percent)
n+=1
#Printing wallet details
while k<=amount-1:
totalworth+=list_btcvalue[k]
if k==0:
print list_currency[k],"(",list_last[k],"|","%+.2f" % list_percent[k],"%",")","\n","Balance : ","%.8f" % list_balance[k],"\n","Available : ","%.8f" % list_available[k],"\n","Pending : ","%.8f" % list_pending[k],"\n","Worth : ","%.8f" % list_btcvalue[k]," btc","\n","\n"
else :
print list_currency[k],"(","%.8f" % list_last[k],"|","%+.2f" % list_percent[k],"%",")","\n","Balance : ","%.8f" % list_balance[k],"\n","Available : ","%.8f" % list_available[k],"\n","Pending : ","%.8f" % list_pending[k],"\n","Worth : ","%.8f" % list_btcvalue[k]," btc","\n","\n"
k+=1
btceuro=float(btceuro)
btcusd=float(btcusd)
print "Total worth : ","%.8f" % totalworth," btc", "\n"," ","%.2f" % (totalworth*btceuro), "eur", "\n"," ","%.2f" % (totalworth*btcusd), "usd", "\n"
| <filename>main.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created by <NAME>
"""
import time
import json
import requests
from bittrex import Bittrex
#Getting API keys
json_secrets=open("secrets.json")
secrets = json.load(json_secrets)
api_key = secrets["key"]
api_secret = secrets["secret"]
#Initialization
list_currency=[]
list_balance=[]
list_available=[]
list_pending=[]
list_last=[]
list_btcvalue=[]
list_market=[]
list_percent=[]
totalworth=0
i=0
n=0
k=0
btceuro=0
btcusd=0
wallet1=Bittrex(api_key,api_secret)
data=wallet1.get_balances()
#Date
print "\n","=====",time.strftime("%A %d %B %Y %H:%M:%S"),"=====","\n"
#Requesting BTC price
contenteuro=requests.get("https://www.bitstamp.net/api/v2/ticker/btceur/")
contenteuro=contenteuro.json()
btceuro=contenteuro["last"]
contentusd=requests.get("https://www.bitstamp.net/api/v2/ticker/btcusd/")
contentusd=contentusd.json()
btcusd=contentusd["last"]
#BTC percentage
btcfloat=float(btcusd)
btcopen=contentusd["open"]
btcopen=float(btcopen)
btcpercent=(1-(btcopen/btcfloat))*100
btcpercent=float(btcpercent)
#Picking non empty currencies
while i<len(data["result"]):
if data["result"][i]["Balance"]==0 and data["result"][i]["Pending"]==0:
i+=1
else:
list_currency.append((data["result"][i]["Currency"]))
list_market.append(("BTC-"+data["result"][i]["Currency"]))
list_balance.append(data["result"][i]["Balance"])
list_available.append(data["result"][i]["Available"])
list_pending.append(data["result"][i]["Pending"])
i+=1
amount=len(list_currency)
#Getting currency price
while n<=amount-1:
if list_market[n]=="BTC-BTC":
list_last.append(btceuro+ " eur | "+btcusd+ " usd")
list_btcvalue.append(list_balance[n])
list_percent.append(btcpercent)
n+=1
else:
ticker=wallet1.get_ticker(list_market[n])
summary=requests.get("https://bittrex.com/api/v1.1/public/getmarketsummary?market="+list_market[n])
summary=summary.json()
summary=summary["result"][0]["PrevDay"]
summary=float(summary)
percent=(1-(summary/ticker["result"]["Last"]))*100
percent=float(percent)
ticker=ticker["result"]["Last"]
ticker=float(ticker)
list_last.append(ticker)
list_btcvalue.append(list_last[n]*list_balance[n])
list_percent.append(percent)
n+=1
#Printing wallet details
while k<=amount-1:
totalworth+=list_btcvalue[k]
if k==0:
print list_currency[k],"(",list_last[k],"|","%+.2f" % list_percent[k],"%",")","\n","Balance : ","%.8f" % list_balance[k],"\n","Available : ","%.8f" % list_available[k],"\n","Pending : ","%.8f" % list_pending[k],"\n","Worth : ","%.8f" % list_btcvalue[k]," btc","\n","\n"
else :
print list_currency[k],"(","%.8f" % list_last[k],"|","%+.2f" % list_percent[k],"%",")","\n","Balance : ","%.8f" % list_balance[k],"\n","Available : ","%.8f" % list_available[k],"\n","Pending : ","%.8f" % list_pending[k],"\n","Worth : ","%.8f" % list_btcvalue[k]," btc","\n","\n"
k+=1
btceuro=float(btceuro)
btcusd=float(btcusd)
print "Total worth : ","%.8f" % totalworth," btc", "\n"," ","%.2f" % (totalworth*btceuro), "eur", "\n"," ","%.2f" % (totalworth*btcusd), "usd", "\n"
| en | 0.734589 | #!/usr/bin/env python # -*- coding: utf-8 -*- Created by <NAME> #Getting API keys #Initialization #Date #Requesting BTC price #BTC percentage #Picking non empty currencies #Getting currency price #Printing wallet details | 2.763586 | 3 |
py3/136.py | Triple-Z/LeetCode | 1 | 6616589 | class Solution:
def singleNumber(self, nums: List[int]) -> int:
n = len(nums)
if n == 1:
return nums[0]
nums.sort()
for i in range(1, n, 2):
if nums[i] != nums[i-1]:
return nums[i-1]
# n is odd, return the last element
return nums[n-1]
| class Solution:
def singleNumber(self, nums: List[int]) -> int:
n = len(nums)
if n == 1:
return nums[0]
nums.sort()
for i in range(1, n, 2):
if nums[i] != nums[i-1]:
return nums[i-1]
# n is odd, return the last element
return nums[n-1]
| en | 0.578769 | # n is odd, return the last element | 3.509275 | 4 |
Patent_US6362718B1_Motionless_Electromagnetic_Generator/Version_1/assembly_global_v2.py | Jay4C/Python-Macros-For_FreeCAD | 0 | 6616590 | import FreeCAD, Part, Drawing, math, Mesh
DOC = FreeCAD.activeDocument()
DOC_NAME = "assembly_global_v2"
def clear_doc():
# Clear the active document deleting all the objects
for obj in DOC.Objects:
DOC.removeObject(obj.Name)
def setview():
# Rearrange View
FreeCAD.Gui.SendMsgToActiveView("ViewFit")
FreeCAD.Gui.activeDocument().activeView().viewAxometric()
if DOC is None:
FreeCAD.newDocument(DOC_NAME)
FreeCAD.setActiveDocument(DOC_NAME)
DOC = FreeCAD.activeDocument()
else:
clear_doc()
# Export assembly_global_v2
__objs__ = []
# part_permanent_magnet_neodyme_n40_10mm_40mm - 1
Mesh.insert(u"part_permanent_magnet_neodyme_n40_10mm_40mm.stl","assembly_global_v2")
FreeCADGui.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm").ShapeColor = (0.30,0.60,0.90)
FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm").Placement = App.Placement(App.Vector(40*0,0,0),App.Rotation(App.Vector(0,1,0),90))
__objs__.append(FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm"))
# part_permanent_magnet_neodyme_n40_10mm_40mm - 2
Mesh.insert(u"part_permanent_magnet_neodyme_n40_10mm_40mm.stl","assembly_global_v2")
FreeCADGui.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm001").ShapeColor = (0.30,0.60,0.90)
FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm001").Placement = App.Placement(App.Vector(40*1,0,0),App.Rotation(App.Vector(0,1,0),90))
__objs__.append(FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm001"))
# part_permanent_magnet_neodyme_n40_10mm_40mm - 3
Mesh.insert(u"part_permanent_magnet_neodyme_n40_10mm_40mm.stl","assembly_global_v2")
FreeCADGui.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm002").ShapeColor = (0.30,0.60,0.90)
FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm002").Placement = App.Placement(App.Vector(40*0,0,40 + 10),App.Rotation(App.Vector(0,1,0),90))
__objs__.append(FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm002"))
# part_permanent_magnet_neodyme_n40_10mm_40mm - 4
Mesh.insert(u"part_permanent_magnet_neodyme_n40_10mm_40mm.stl","assembly_global_v2")
FreeCADGui.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm003").ShapeColor = (0.30,0.60,0.90)
FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm003").Placement = App.Placement(App.Vector(5,0,5),App.Rotation(App.Vector(0,1,0),0))
__objs__.append(FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm003"))
# part_permanent_magnet_neodyme_n40_10mm_40mm - 5
Mesh.insert(u"part_permanent_magnet_neodyme_n40_10mm_40mm.stl","assembly_global_v2")
FreeCADGui.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm004").ShapeColor = (0.30,0.60,0.90)
FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm004").Placement = App.Placement(App.Vector(40*2 - 5,0,5),App.Rotation(App.Vector(0,1,0),0))
__objs__.append(FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm004"))
# part_permanent_magnet_neodyme_n40_10mm_40mm - 6
Mesh.insert(u"part_permanent_magnet_neodyme_n40_10mm_40mm.stl","assembly_global_v2")
FreeCADGui.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm005").ShapeColor = (0.30,0.60,0.90)
FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm005").Placement = App.Placement(App.Vector(40*1,0,40 + 10),App.Rotation(App.Vector(0,1,0),90))
__objs__.append(FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm005"))
# part_output_coil_without_windings_di10mm_dw14mm_de40mm_hc36mm - 1
Mesh.insert(u"part_output_coil_without_windings_di10mm_dw14mm_de40mm_hc36mm.stl","assembly_global_v2")
FreeCADGui.getDocument("assembly_global_v2").getObject("part_output_coil_without_windings_di10mm_dw14mm_de40mm_hc36mm").ShapeColor = (0.60,0.60,0.60)
FreeCAD.getDocument("assembly_global_v2").getObject("part_output_coil_without_windings_di10mm_dw14mm_de40mm_hc36mm").Placement = App.Placement(App.Vector(5,0,7),App.Rotation(App.Vector(0,1,0),0))
__objs__.append(FreeCAD.getDocument("assembly_global_v2").getObject("part_output_coil_without_windings_di10mm_dw14mm_de40mm_hc36mm"))
# part_output_coil_without_windings_di10mm_dw14mm_de40mm_hc36mm - 2
Mesh.insert(u"part_output_coil_without_windings_di10mm_dw14mm_de40mm_hc36mm.stl","assembly_global_v2")
FreeCADGui.getDocument("assembly_global_v2").getObject("part_output_coil_without_windings_di10mm_dw14mm_de40mm_hc36mm001").ShapeColor = (0.60,0.60,0.60)
FreeCAD.getDocument("assembly_global_v2").getObject("part_output_coil_without_windings_di10mm_dw14mm_de40mm_hc36mm001").Placement = App.Placement(App.Vector(40*2 - 5,0,7),App.Rotation(App.Vector(0,1,0),0))
__objs__.append(FreeCAD.getDocument("assembly_global_v2").getObject("part_output_coil_without_windings_di10mm_dw14mm_de40mm_hc36mm001"))
# part_input_coil_without_windings_di10mm_dw14mm_de40mm_hc18mm - 1
Mesh.insert(u"part_input_coil_without_windings_di10mm_dw14mm_de40mm_hc18mm.stl","assembly_global_v2")
FreeCADGui.getDocument("assembly_global_v2").getObject("part_input_coil_without_windings_di10mm_dw14mm_de40mm_hc18mm").ShapeColor = (0.60,0.90,0.60)
FreeCAD.getDocument("assembly_global_v2").getObject("part_input_coil_without_windings_di10mm_dw14mm_de40mm_hc18mm").Placement = App.Placement(App.Vector((40*2 - 18)/2,0,0),App.Rotation(App.Vector(0,1,0),90))
__objs__.append(FreeCAD.getDocument("assembly_global_v2").getObject("part_input_coil_without_windings_di10mm_dw14mm_de40mm_hc18mm"))
# part_input_coil_without_windings_di10mm_dw14mm_de40mm_hc18mm - 2
Mesh.insert(u"part_input_coil_without_windings_di10mm_dw14mm_de40mm_hc18mm.stl","assembly_global_v2")
FreeCADGui.getDocument("assembly_global_v2").getObject("part_input_coil_without_windings_di10mm_dw14mm_de40mm_hc18mm001").ShapeColor = (0.60,0.90,0.60)
FreeCAD.getDocument("assembly_global_v2").getObject("part_input_coil_without_windings_di10mm_dw14mm_de40mm_hc18mm001").Placement = App.Placement(App.Vector((40*2 - 18)/2,0,40 + 10),App.Rotation(App.Vector(0,1,0),90))
__objs__.append(FreeCAD.getDocument("assembly_global_v2").getObject("part_input_coil_without_windings_di10mm_dw14mm_de40mm_hc18mm001"))
setview()
Mesh.export(__objs__,u"assembly_global_v2.stl")
del __objs__
# Generate PNG files
file = 'assembly_global_v2_'
# Ombr�
Gui.runCommand('Std_DrawStyle',5)
i = 1
Gui.activeDocument().activeView().viewIsometric()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewFront()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewTop()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewRight()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewRear()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewBottom()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewLeft()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
# Filaire
Gui.runCommand('Std_DrawStyle',2)
i += 1
Gui.activeDocument().activeView().viewIsometric()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewFront()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewTop()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewRight()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewRear()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewBottom()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewLeft()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
| import FreeCAD, Part, Drawing, math, Mesh
DOC = FreeCAD.activeDocument()
DOC_NAME = "assembly_global_v2"
def clear_doc():
# Clear the active document deleting all the objects
for obj in DOC.Objects:
DOC.removeObject(obj.Name)
def setview():
# Rearrange View
FreeCAD.Gui.SendMsgToActiveView("ViewFit")
FreeCAD.Gui.activeDocument().activeView().viewAxometric()
if DOC is None:
FreeCAD.newDocument(DOC_NAME)
FreeCAD.setActiveDocument(DOC_NAME)
DOC = FreeCAD.activeDocument()
else:
clear_doc()
# Export assembly_global_v2
__objs__ = []
# part_permanent_magnet_neodyme_n40_10mm_40mm - 1
Mesh.insert(u"part_permanent_magnet_neodyme_n40_10mm_40mm.stl","assembly_global_v2")
FreeCADGui.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm").ShapeColor = (0.30,0.60,0.90)
FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm").Placement = App.Placement(App.Vector(40*0,0,0),App.Rotation(App.Vector(0,1,0),90))
__objs__.append(FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm"))
# part_permanent_magnet_neodyme_n40_10mm_40mm - 2
Mesh.insert(u"part_permanent_magnet_neodyme_n40_10mm_40mm.stl","assembly_global_v2")
FreeCADGui.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm001").ShapeColor = (0.30,0.60,0.90)
FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm001").Placement = App.Placement(App.Vector(40*1,0,0),App.Rotation(App.Vector(0,1,0),90))
__objs__.append(FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm001"))
# part_permanent_magnet_neodyme_n40_10mm_40mm - 3
Mesh.insert(u"part_permanent_magnet_neodyme_n40_10mm_40mm.stl","assembly_global_v2")
FreeCADGui.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm002").ShapeColor = (0.30,0.60,0.90)
FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm002").Placement = App.Placement(App.Vector(40*0,0,40 + 10),App.Rotation(App.Vector(0,1,0),90))
__objs__.append(FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm002"))
# part_permanent_magnet_neodyme_n40_10mm_40mm - 4
Mesh.insert(u"part_permanent_magnet_neodyme_n40_10mm_40mm.stl","assembly_global_v2")
FreeCADGui.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm003").ShapeColor = (0.30,0.60,0.90)
FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm003").Placement = App.Placement(App.Vector(5,0,5),App.Rotation(App.Vector(0,1,0),0))
__objs__.append(FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm003"))
# part_permanent_magnet_neodyme_n40_10mm_40mm - 5
Mesh.insert(u"part_permanent_magnet_neodyme_n40_10mm_40mm.stl","assembly_global_v2")
FreeCADGui.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm004").ShapeColor = (0.30,0.60,0.90)
FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm004").Placement = App.Placement(App.Vector(40*2 - 5,0,5),App.Rotation(App.Vector(0,1,0),0))
__objs__.append(FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm004"))
# part_permanent_magnet_neodyme_n40_10mm_40mm - 6
Mesh.insert(u"part_permanent_magnet_neodyme_n40_10mm_40mm.stl","assembly_global_v2")
FreeCADGui.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm005").ShapeColor = (0.30,0.60,0.90)
FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm005").Placement = App.Placement(App.Vector(40*1,0,40 + 10),App.Rotation(App.Vector(0,1,0),90))
__objs__.append(FreeCAD.getDocument("assembly_global_v2").getObject("part_permanent_magnet_neodyme_n40_10mm_40mm005"))
# part_output_coil_without_windings_di10mm_dw14mm_de40mm_hc36mm - 1
Mesh.insert(u"part_output_coil_without_windings_di10mm_dw14mm_de40mm_hc36mm.stl","assembly_global_v2")
FreeCADGui.getDocument("assembly_global_v2").getObject("part_output_coil_without_windings_di10mm_dw14mm_de40mm_hc36mm").ShapeColor = (0.60,0.60,0.60)
FreeCAD.getDocument("assembly_global_v2").getObject("part_output_coil_without_windings_di10mm_dw14mm_de40mm_hc36mm").Placement = App.Placement(App.Vector(5,0,7),App.Rotation(App.Vector(0,1,0),0))
__objs__.append(FreeCAD.getDocument("assembly_global_v2").getObject("part_output_coil_without_windings_di10mm_dw14mm_de40mm_hc36mm"))
# part_output_coil_without_windings_di10mm_dw14mm_de40mm_hc36mm - 2
Mesh.insert(u"part_output_coil_without_windings_di10mm_dw14mm_de40mm_hc36mm.stl","assembly_global_v2")
FreeCADGui.getDocument("assembly_global_v2").getObject("part_output_coil_without_windings_di10mm_dw14mm_de40mm_hc36mm001").ShapeColor = (0.60,0.60,0.60)
FreeCAD.getDocument("assembly_global_v2").getObject("part_output_coil_without_windings_di10mm_dw14mm_de40mm_hc36mm001").Placement = App.Placement(App.Vector(40*2 - 5,0,7),App.Rotation(App.Vector(0,1,0),0))
__objs__.append(FreeCAD.getDocument("assembly_global_v2").getObject("part_output_coil_without_windings_di10mm_dw14mm_de40mm_hc36mm001"))
# part_input_coil_without_windings_di10mm_dw14mm_de40mm_hc18mm - 1
Mesh.insert(u"part_input_coil_without_windings_di10mm_dw14mm_de40mm_hc18mm.stl","assembly_global_v2")
FreeCADGui.getDocument("assembly_global_v2").getObject("part_input_coil_without_windings_di10mm_dw14mm_de40mm_hc18mm").ShapeColor = (0.60,0.90,0.60)
FreeCAD.getDocument("assembly_global_v2").getObject("part_input_coil_without_windings_di10mm_dw14mm_de40mm_hc18mm").Placement = App.Placement(App.Vector((40*2 - 18)/2,0,0),App.Rotation(App.Vector(0,1,0),90))
__objs__.append(FreeCAD.getDocument("assembly_global_v2").getObject("part_input_coil_without_windings_di10mm_dw14mm_de40mm_hc18mm"))
# part_input_coil_without_windings_di10mm_dw14mm_de40mm_hc18mm - 2
Mesh.insert(u"part_input_coil_without_windings_di10mm_dw14mm_de40mm_hc18mm.stl","assembly_global_v2")
FreeCADGui.getDocument("assembly_global_v2").getObject("part_input_coil_without_windings_di10mm_dw14mm_de40mm_hc18mm001").ShapeColor = (0.60,0.90,0.60)
FreeCAD.getDocument("assembly_global_v2").getObject("part_input_coil_without_windings_di10mm_dw14mm_de40mm_hc18mm001").Placement = App.Placement(App.Vector((40*2 - 18)/2,0,40 + 10),App.Rotation(App.Vector(0,1,0),90))
__objs__.append(FreeCAD.getDocument("assembly_global_v2").getObject("part_input_coil_without_windings_di10mm_dw14mm_de40mm_hc18mm001"))
setview()
Mesh.export(__objs__,u"assembly_global_v2.stl")
del __objs__
# Generate PNG files
file = 'assembly_global_v2_'
# Ombr�
Gui.runCommand('Std_DrawStyle',5)
i = 1
Gui.activeDocument().activeView().viewIsometric()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewFront()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewTop()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewRight()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewRear()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewBottom()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewLeft()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
# Filaire
Gui.runCommand('Std_DrawStyle',2)
i += 1
Gui.activeDocument().activeView().viewIsometric()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewFront()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewTop()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewRight()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewRear()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewBottom()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
i += 1
Gui.activeDocument().activeView().viewLeft()
Gui.activeDocument().activeView().saveImage(file + str(i) + '.png',1117,388,'Current')
| en | 0.375099 | # Clear the active document deleting all the objects # Rearrange View # Export assembly_global_v2 # part_permanent_magnet_neodyme_n40_10mm_40mm - 1 # part_permanent_magnet_neodyme_n40_10mm_40mm - 2 # part_permanent_magnet_neodyme_n40_10mm_40mm - 3 # part_permanent_magnet_neodyme_n40_10mm_40mm - 4 # part_permanent_magnet_neodyme_n40_10mm_40mm - 5 # part_permanent_magnet_neodyme_n40_10mm_40mm - 6 # part_output_coil_without_windings_di10mm_dw14mm_de40mm_hc36mm - 1 # part_output_coil_without_windings_di10mm_dw14mm_de40mm_hc36mm - 2 # part_input_coil_without_windings_di10mm_dw14mm_de40mm_hc18mm - 1 # part_input_coil_without_windings_di10mm_dw14mm_de40mm_hc18mm - 2 # Generate PNG files # Ombr� # Filaire | 2.215532 | 2 |
1068.py | gabzin/uri | 3 | 6616591 | while True:
try:
string=input()
tam=len(string)
for i in range(tam):
if string[i]=='(':e+=1
elif string[i]==')':
d+=1
if e>0:
e-=1
d-=1
print("correct") if e==0 and d==0 else print("incorrect")
except EOFError:break
| while True:
try:
string=input()
tam=len(string)
for i in range(tam):
if string[i]=='(':e+=1
elif string[i]==')':
d+=1
if e>0:
e-=1
d-=1
print("correct") if e==0 and d==0 else print("incorrect")
except EOFError:break
| none | 1 | 3.606808 | 4 | |
python/linkedlist/StackQueue.py | ulysses-sl/algorithm-implementations | 0 | 6616592 | from LinkedList import SinglyLinkedList, DoublyLinkedList
class SLStack:
def __init__(self):
self.store = None
self.count = 0
def push(self, v):
x = SinglyLinkedList(v)
x.next = self.store
self.count += 1
self.store = x
def pop(self):
if self.store:
v = self.store.value
self.store = self.store.next
self.count -= 1
return v
else:
return None
def peek(self):
if self.store:
return self.store.value
else:
return None
def total(self):
return self.count
class DLQueue:
def __init__(self):
self.head = DoublyLinkedList(None)
self.tail = DoublyLinkedList(None)
self.head.next = self.tail
self.tail.prev = self.head
self.count = 0
def push(self, v):
x = SinglyLinkedList(v)
# connect x.next
x.next = self.head.next
self.head.next.prev = x
# connect x.prev
x.prev = self.head
self.head.next = x
self.count += 1
def pop(self):
if self.count <= 0:
return None
v = self.tail.prev.value
self.tail.prev.prev.next = self.tail
self.tail.prev = self. tail.prev.prev
self.count -= 1
return v
def peek(self):
if self.count > 0:
return self.tail.prev.value
else:
return None
def total(self):
return self.count
class SQueue:
def __init__(self):
self.istack = SLStack()
self.ostack = SLStack()
def push(self, v):
self.istack.push(v)
def pop(self):
if self.ostack.count > 0:
return self.ostack.pop()
else:
while self.istack.count > 0:
self.ostack.push(self.istack.pop())
if self.ostack.count > 0:
return self.ostack.pop()
else:
return None
def total(self):
return self.istack.total() + self.ostack.total()
class QStack1:
def __init__(self):
self.q1 = DLQueue()
self.q2 = DLQueue()
def push(self, v):
self.q1.push(v)
while self.q2.total() > 0:
self.q1.push(self.q2.pop())
self.q1, self.q2 = self.q2, self.q1
def pop(self):
if self.q2.total() > 0:
return self.q2.pop()
else:
return None
def total(self):
return self.q1.total() + self.q2.total()
class QStack2:
def __init__(self):
self.q1 = DLQueue()
self.q2 = DLQueue()
def push(self, v):
self.q1.push(v)
def pop(self):
if self.q1.count == 0:
return None
while self.q1.total() > 1:
self.q2.push(self.q1.pop())
v = self.q1.pop()
self.q1, self.q2 = self.q2, self.q1
return v
def total(self):
return self.q1.total() + self.q2.total()
| from LinkedList import SinglyLinkedList, DoublyLinkedList
class SLStack:
def __init__(self):
self.store = None
self.count = 0
def push(self, v):
x = SinglyLinkedList(v)
x.next = self.store
self.count += 1
self.store = x
def pop(self):
if self.store:
v = self.store.value
self.store = self.store.next
self.count -= 1
return v
else:
return None
def peek(self):
if self.store:
return self.store.value
else:
return None
def total(self):
return self.count
class DLQueue:
def __init__(self):
self.head = DoublyLinkedList(None)
self.tail = DoublyLinkedList(None)
self.head.next = self.tail
self.tail.prev = self.head
self.count = 0
def push(self, v):
x = SinglyLinkedList(v)
# connect x.next
x.next = self.head.next
self.head.next.prev = x
# connect x.prev
x.prev = self.head
self.head.next = x
self.count += 1
def pop(self):
if self.count <= 0:
return None
v = self.tail.prev.value
self.tail.prev.prev.next = self.tail
self.tail.prev = self. tail.prev.prev
self.count -= 1
return v
def peek(self):
if self.count > 0:
return self.tail.prev.value
else:
return None
def total(self):
return self.count
class SQueue:
def __init__(self):
self.istack = SLStack()
self.ostack = SLStack()
def push(self, v):
self.istack.push(v)
def pop(self):
if self.ostack.count > 0:
return self.ostack.pop()
else:
while self.istack.count > 0:
self.ostack.push(self.istack.pop())
if self.ostack.count > 0:
return self.ostack.pop()
else:
return None
def total(self):
return self.istack.total() + self.ostack.total()
class QStack1:
def __init__(self):
self.q1 = DLQueue()
self.q2 = DLQueue()
def push(self, v):
self.q1.push(v)
while self.q2.total() > 0:
self.q1.push(self.q2.pop())
self.q1, self.q2 = self.q2, self.q1
def pop(self):
if self.q2.total() > 0:
return self.q2.pop()
else:
return None
def total(self):
return self.q1.total() + self.q2.total()
class QStack2:
def __init__(self):
self.q1 = DLQueue()
self.q2 = DLQueue()
def push(self, v):
self.q1.push(v)
def pop(self):
if self.q1.count == 0:
return None
while self.q1.total() > 1:
self.q2.push(self.q1.pop())
v = self.q1.pop()
self.q1, self.q2 = self.q2, self.q1
return v
def total(self):
return self.q1.total() + self.q2.total()
| en | 0.483947 | # connect x.next # connect x.prev | 3.583594 | 4 |
src/pyqreg/quantile_regression.py | mozjay0619/pyqreg | 0 | 6616593 | <filename>src/pyqreg/quantile_regression.py<gh_stars>0
import numpy as np
import scipy.stats as stats
from numpy.linalg import pinv
from scipy.stats import norm, t
from .c.blas_lapack import lapack_cholesky_inv
from .c.cluster_cov import psi_function
from .c.fit_coefs import fit_coefs
from .c.matrix_opaccum import matrix_opaccum
from .c.stats import invnormal, normalden
from .utils import rng_generator
class QuantReg:
def __init__(self, y, X):
if not X.flags["F_CONTIGUOUS"]:
X = np.array(X, np.double, copy=False, order="F", ndmin=1)
if not y.flags["F_CONTIGUOUS"]:
y = np.array(y, np.double, copy=False, order="F", ndmin=1)
self.y = y
self.X = X
def fit(
self,
q,
cov_type="robust",
fit_method=None,
seed=None,
eps=1e-6,
Mm_factor=0.8,
max_bad_fixup=3,
kappa_eps=1e-6,
kernel="epa",
bandwidth="hsheather",
cov_kwds=dict(),
):
"""Solve by interior point method (Mehrotra's predictor corrector
algorithm). If n >= 100,000, it will use preprocessing step following
Portnoy and Koenker (1997).
Parameters
----------
q : double
Quantile value strictly between 0 and 1
fit_method : str or None. Default None.
Coefficient estimation method.
- None : uses ipm if n < 100000, else, preproc-ipm.
- ipm : interior point method.
- preproc-ipm : interior point method with preprocessing.
cov_type : str. Default 'robust'.
Type of covariance estimator to use. Available types are
``iid`` for iid errors, ``robust`` for heteroskedastic errors,
and ``cluster`` for clustered errors.
seed : int or None
Random seed to use if preproc-ipm is used for subsampling.
kernel : str, kernel to use in the kernel density estimation for the
asymptotic covariance matrix:
- epa: Epanechnikov
- cos: Cosine
- gau: Gaussian
- par: Parzene
bandwidth : str, Bandwidth selection method in kernel density
estimation for asymptotic covariance estimate (full
references in QuantReg docstring):
- hsheather: Hall-Sheather (1988)
- bofinger: Bofinger (1975)
- chamberlain: Chamberlain (1994)
cov_kwds : dict
Additional keywords used in the covariance specification.
- groups : ndarray int type
Integer-valued index of clusters or groups. Required if using
the ``cluster`` cov_type.
- kappa_type : str. Default 'silverman'.
The scaling factor for the bandwidth. Available rule of thumbs
type are ``silverman`` and ``median``.
"""
n = len(self.X)
if fit_method is None:
if n >= 100000:
rng = rng_generator(seed)
self.params = self.fit_preproc_ipm(
q, rng, Mm_factor, max_bad_fixup, kappa_eps
)
else:
self.params = self.fit_ipm(q, eps)
elif fit_method == "ipm":
self.params = self.fit_ipm(q, eps)
elif fit_method == "preproc-ipm":
rng = rng_generator(seed)
self.params = self.fit_preproc_ipm(
q, rng, Mm_factor, max_bad_fixup, kappa_eps
)
# Estimate covariance matrix
if cov_type == "cluster":
if "groups" not in cov_kwds:
raise ValueError(
'You must provide "groups" keyword value in cov_kwds if data is clustered'
)
else:
groups = cov_kwds["groups"]
if not np.issubdtype(groups.dtype, np.integer):
raise TypeError(
"groups array must be integer type. Instead it is {}.".format(
groups.dtype
)
)
groups = groups.astype(np.int32)
if "kappa_type" not in cov_kwds:
kappa_type = "silverman"
else:
kappa_type = cov_kwds["kappa_type"]
self.vcov = self.cluster_cov(groups, self.params, q, kappa_type)
self.bse = np.sqrt(np.diag(self.vcov))
elif cov_type == "robust":
self.vcov = self.iid_robust_cov(
self.params, q, kernel, bandwidth, vcov="robust"
)
self.bse = np.sqrt(np.diag(self.vcov))
elif cov_type == "iid":
self.vcov = self.iid_robust_cov(
self.params, q, kernel, bandwidth, vcov="iid"
)
self.bse = np.sqrt(np.diag(self.vcov))
else:
cov_type_names = ["iid", "robust", "cluster"]
raise Exception("cov_type must be one of " + ", ".join(cov_type_names))
# Compute two-sided p-values.
self.tvalues = self.params / self.bse
self.pvalues = np.empty(len(self.tvalues))
for i, z in enumerate(np.abs(self.tvalues)):
self.pvalues[i] = (
1 - t.cdf(x=z, loc=0, scale=1, df=n - self.X.shape[1])
) * 2
self.nobs = n
return self
def conf_int(self, alpha=0.05):
"""Compute the confidence intervals.
Parameters
----------
alpha : float
"""
self.upb = (
self.params
+ t.ppf(q=1 - alpha / 2.0, df=self.nobs - self.X.shape[1]) * self.bse
)
self.lob = (
self.params
- t.ppf(q=1 - alpha / 2.0, df=self.nobs - self.X.shape[1]) * self.bse
)
return np.squeeze(np.dstack([self.lob, self.upb]))
def fit_ipm(self, q, eps=1e-6):
"""Estimate coefficients using the interior point method.
Paramters
---------
q : double
Quantile value strictly between 0 and 1
eps : double
Duality gap stopping criterion
"""
coefs = _fit_coefs(self.X, self.y, q, eps)
return coefs
def fit_preproc_ipm(
self, q, rng, eps=1e-6, Mm_factor=0.8, max_bad_fixup=3, kappa_eps=1e-6
):
"""Preprocessing phase as described in Portnoy and Koenker,
Statistical Science, (1997) 279-300.
Python implementation of the R code "rq.fit.pfn".
As was cautioned, use only when the problem size is very large. The
recommended size of n according to the original author is > 100,000.
Parameters
----------
"""
X = self.X
y = self.y
n, p = X.shape
m = int(((p + 1) * n) ** (2 / 3))
not_optimal = True
while not_optimal:
if m < n:
s = rng.choice(n, m, replace=False)
else:
return _fit_coefs(X, y, q, eps)
xx = X[s]
yy = y[s]
xx = np.array(xx, np.double, copy=False, order="F", ndmin=1)
yy = np.array(yy, np.double, copy=False, order="F", ndmin=1)
first_coefs = _fit_coefs(xx, yy, q, eps)
xxinv = pinv(xx.T @ xx)
band = np.sqrt(((X @ xxinv) ** 2) @ np.ones(p))
r = y - X @ first_coefs
M = Mm_factor * m
lo_q = max(1 / n, q - M / (2 * n))
hi_q = min(q + M / (2 * n), (n - 1) / n)
kappa = np.quantile(r / np.maximum(kappa_eps, band), [lo_q, hi_q])
sl = r < band * kappa[0]
su = r > band * kappa[1]
bad_fixup = 0
while not_optimal & (bad_fixup < max_bad_fixup):
xx = X[~su & ~sl]
yy = y[~su & ~sl]
if any(sl):
glob_x = X[sl].T @ np.ones(np.sum(sl))
# Notes:
# 1. The resulting matrix is transposed one more time because np.ones is 1 dimensional.
# 2. Summing data with same residual signs will not change the residual sign of the summed.
glob_y = np.sum(y[sl])
xx = np.vstack([xx, glob_x])
yy = np.r_[yy, glob_y]
if any(su):
ghib_x = X[su].T @ np.ones(np.sum(su))
ghib_y = np.sum(y[su])
xx = np.vstack([xx, ghib_x])
yy = np.r_[yy, ghib_y]
xx = np.array(xx, np.double, copy=False, order="F", ndmin=1)
yy = np.array(yy, np.double, copy=False, order="F", ndmin=1)
coefs = _fit_coefs(xx, yy, q, eps)
r = y - X @ coefs
su_bad = (r < 0) & su
sl_bad = (r > 0) & sl
if any(np.r_[su_bad, sl_bad]):
if np.sum(sl_bad) + np.sum(su_bad) > 0.1 * M:
m = 2 * m
break
su = su & ~su_bad
sl = sl & ~sl_bad
bad_fixup = bad_fixup + 1
else:
not_optimal = False
return coefs
def cluster_cov(self, groups, beta, q, kappa_type="silverman"):
"""Covariance matrix estimator as proposed by <NAME> Silva (2013).
Translated from Stata code of qreg2.
Parameters
----------
groups : ndarray
The group index array.
beta : ndarray
The estimated parameter values.
q : double
The quantile strictly between 0 and 1.
kappa_type : str. Default 'silverman'.
The scaling factor for the bandwidth. Available rule of thumbs
type are ``silverman`` and ``median``.
"""
theta = q
n = len(self.X)
sort_args = groups.argsort(kind="mergesort")
self.X = self.X[sort_args]
self.y = self.y[sort_args]
groups = groups[sort_args]
self.X = np.array(self.X, np.double, copy=False, order="F", ndmin=1)
self.y = np.array(self.y, np.double, copy=False, order="F", ndmin=1)
groups = np.array(groups, np.int32, copy=False, order="F", ndmin=1)
G = len(np.unique(groups))
# Compute residuals
resid = self.y - self.X @ beta
# Compute A
# psi
psi_resid = psi_function(resid, theta)
A = matrix_opaccum(self.X, groups, psi_resid, G)
# Compute B
# fmt: off
# h_nG
h_nG = (invnormal(0.975)**(2/3)) * \
((1.5 * ((normalden(invnormal(theta)))**2) / (2 * ((invnormal(theta))**2) + 1))**(1/3)) * \
(n)**(-1/3)
# fmt: on
# kappa
if kappa_type == "median":
k = np.median(np.abs(resid))
elif kappa_type == "silverman":
k = min(
np.std(resid),
(np.percentile(resid, 75) - np.percentile(resid, 25)) / 1.34,
)
else:
raise ValueError(
"Incorrect kappa_type {}. Please choose between median and silverman".format(
kappa_type
)
)
# c^_G
chat_G = k * (invnormal(theta + h_nG) - invnormal(theta - h_nG))
# B weights
dens = np.sqrt((np.abs(resid) < chat_G).astype(np.float64) / (2 * chat_G))
_groups = np.arange(len(groups)).astype(np.int32)
B = matrix_opaccum(self.X, _groups, dens, n)
# Compute Binv A Binv
B = np.array(B, np.double, copy=False, order="F", ndmin=1)
lapack_cholesky_inv(B)
return B @ A @ B
def iid_robust_cov(self, beta, q, kernel, bandwidth, vcov="robust"):
"""Covariance matrix estimation for iid data as written in the statsmodels:
https://www.statsmodels.org/stable/_modules/statsmodels/regression/quantile_regression.html#QuantReg
Parameters
----------
kernel : str, kernel to use in the kernel density estimation for the
asymptotic covariance matrix:
- epa: Epanechnikov
- cos: Cosine
- gau: Gaussian
- par: Parzene
bandwidth : str, Bandwidth selection method in kernel density
estimation for asymptotic covariance estimate (full
references in QuantReg docstring):
- hsheather: Hall-Sheather (1988)
- bofinger: Bofinger (1975)
- chamberlain: Chamberlain (1994)
"""
kern_names = ["biw", "cos", "epa", "gau", "par"]
if kernel not in kern_names:
raise Exception("kernel must be one of " + ", ".join(kern_names))
else:
kernel = kernels[kernel]
if bandwidth == "hsheather":
bandwidth = hall_sheather
elif bandwidth == "bofinger":
bandwidth = bofinger
elif bandwidth == "chamberlain":
bandwidth = chamberlain
else:
raise Exception(
"bandwidth must be in 'hsheather', 'bofinger', 'chamberlain'"
)
# Compute residuals
resid = self.y - self.X @ beta
nobs = len(self.X)
iqre = stats.scoreatpercentile(resid, 75) - stats.scoreatpercentile(resid, 25)
h = bandwidth(nobs, q)
h = min(np.std(self.y), iqre / 1.34) * (norm.ppf(q + h) - norm.ppf(q - h))
fhat0 = 1.0 / (nobs * h) * np.sum(kernel(resid / h))
if vcov == "robust":
d = np.where(resid > 0, (q / fhat0) ** 2, ((1 - q) / fhat0) ** 2)
xtxi = pinv(np.dot(self.X.T, self.X))
xtdx = np.dot(self.X.T * d[np.newaxis, :], self.X)
vcov = xtxi @ xtdx @ xtxi
elif vcov == "iid":
vcov = (1.0 / fhat0) ** 2 * q * (1 - q) * pinv(np.dot(self.X.T, self.X))
return vcov
# fmt: off
# From https://www.statsmodels.org/stable/_modules/statsmodels/regression/quantile_regression.html#QuantReg.
def _parzen(u):
z = np.where(np.abs(u) <= .5, 4./3 - 8. * u**2 + 8. * np.abs(u)**3,
8. * (1 - np.abs(u))**3 / 3.)
z[np.abs(u) > 1] = 0
return z
kernels = {}
kernels['biw'] = lambda u: 15. / 16 * (1 - u**2)**2 * np.where(np.abs(u) <= 1, 1, 0)
kernels['cos'] = lambda u: np.where(np.abs(u) <= .5, 1 + np.cos(2 * np.pi * u), 0)
kernels['epa'] = lambda u: 3. / 4 * (1-u**2) * np.where(np.abs(u) <= 1, 1, 0)
kernels['par'] = _parzen
def hall_sheather(n, q, alpha=.05):
z = norm.ppf(q)
num = 1.5 * norm.pdf(z)**2.
den = 2. * z**2. + 1.
h = n**(-1. / 3) * norm.ppf(1. - alpha / 2.)**(2./3) * (num / den)**(1./3)
return h
def bofinger(n, q):
num = 9. / 2 * norm.pdf(2 * norm.ppf(q))**4
den = (2 * norm.ppf(q)**2 + 1)**2
h = n**(-1. / 5) * (num / den)**(1. / 5)
return h
def chamberlain(n, q, alpha=.05):
return norm.ppf(1 - alpha / 2) * np.sqrt(q*(1 - q) / n)
# fmt: on
def _fit_coefs(X, y, q, eps):
"""In cases of convergence issues, we increase the duality gap
tolerance.
"""
coefs = fit_coefs(X, y, q, eps)
while any(np.isnan(coefs)):
eps *= 5.0
coefs = fit_coefs(X, y, q, eps)
return coefs
| <filename>src/pyqreg/quantile_regression.py<gh_stars>0
import numpy as np
import scipy.stats as stats
from numpy.linalg import pinv
from scipy.stats import norm, t
from .c.blas_lapack import lapack_cholesky_inv
from .c.cluster_cov import psi_function
from .c.fit_coefs import fit_coefs
from .c.matrix_opaccum import matrix_opaccum
from .c.stats import invnormal, normalden
from .utils import rng_generator
class QuantReg:
def __init__(self, y, X):
if not X.flags["F_CONTIGUOUS"]:
X = np.array(X, np.double, copy=False, order="F", ndmin=1)
if not y.flags["F_CONTIGUOUS"]:
y = np.array(y, np.double, copy=False, order="F", ndmin=1)
self.y = y
self.X = X
def fit(
self,
q,
cov_type="robust",
fit_method=None,
seed=None,
eps=1e-6,
Mm_factor=0.8,
max_bad_fixup=3,
kappa_eps=1e-6,
kernel="epa",
bandwidth="hsheather",
cov_kwds=dict(),
):
"""Solve by interior point method (Mehrotra's predictor corrector
algorithm). If n >= 100,000, it will use preprocessing step following
Portnoy and Koenker (1997).
Parameters
----------
q : double
Quantile value strictly between 0 and 1
fit_method : str or None. Default None.
Coefficient estimation method.
- None : uses ipm if n < 100000, else, preproc-ipm.
- ipm : interior point method.
- preproc-ipm : interior point method with preprocessing.
cov_type : str. Default 'robust'.
Type of covariance estimator to use. Available types are
``iid`` for iid errors, ``robust`` for heteroskedastic errors,
and ``cluster`` for clustered errors.
seed : int or None
Random seed to use if preproc-ipm is used for subsampling.
kernel : str, kernel to use in the kernel density estimation for the
asymptotic covariance matrix:
- epa: Epanechnikov
- cos: Cosine
- gau: Gaussian
- par: Parzene
bandwidth : str, Bandwidth selection method in kernel density
estimation for asymptotic covariance estimate (full
references in QuantReg docstring):
- hsheather: Hall-Sheather (1988)
- bofinger: Bofinger (1975)
- chamberlain: Chamberlain (1994)
cov_kwds : dict
Additional keywords used in the covariance specification.
- groups : ndarray int type
Integer-valued index of clusters or groups. Required if using
the ``cluster`` cov_type.
- kappa_type : str. Default 'silverman'.
The scaling factor for the bandwidth. Available rule of thumbs
type are ``silverman`` and ``median``.
"""
n = len(self.X)
if fit_method is None:
if n >= 100000:
rng = rng_generator(seed)
self.params = self.fit_preproc_ipm(
q, rng, Mm_factor, max_bad_fixup, kappa_eps
)
else:
self.params = self.fit_ipm(q, eps)
elif fit_method == "ipm":
self.params = self.fit_ipm(q, eps)
elif fit_method == "preproc-ipm":
rng = rng_generator(seed)
self.params = self.fit_preproc_ipm(
q, rng, Mm_factor, max_bad_fixup, kappa_eps
)
# Estimate covariance matrix
if cov_type == "cluster":
if "groups" not in cov_kwds:
raise ValueError(
'You must provide "groups" keyword value in cov_kwds if data is clustered'
)
else:
groups = cov_kwds["groups"]
if not np.issubdtype(groups.dtype, np.integer):
raise TypeError(
"groups array must be integer type. Instead it is {}.".format(
groups.dtype
)
)
groups = groups.astype(np.int32)
if "kappa_type" not in cov_kwds:
kappa_type = "silverman"
else:
kappa_type = cov_kwds["kappa_type"]
self.vcov = self.cluster_cov(groups, self.params, q, kappa_type)
self.bse = np.sqrt(np.diag(self.vcov))
elif cov_type == "robust":
self.vcov = self.iid_robust_cov(
self.params, q, kernel, bandwidth, vcov="robust"
)
self.bse = np.sqrt(np.diag(self.vcov))
elif cov_type == "iid":
self.vcov = self.iid_robust_cov(
self.params, q, kernel, bandwidth, vcov="iid"
)
self.bse = np.sqrt(np.diag(self.vcov))
else:
cov_type_names = ["iid", "robust", "cluster"]
raise Exception("cov_type must be one of " + ", ".join(cov_type_names))
# Compute two-sided p-values.
self.tvalues = self.params / self.bse
self.pvalues = np.empty(len(self.tvalues))
for i, z in enumerate(np.abs(self.tvalues)):
self.pvalues[i] = (
1 - t.cdf(x=z, loc=0, scale=1, df=n - self.X.shape[1])
) * 2
self.nobs = n
return self
def conf_int(self, alpha=0.05):
"""Compute the confidence intervals.
Parameters
----------
alpha : float
"""
self.upb = (
self.params
+ t.ppf(q=1 - alpha / 2.0, df=self.nobs - self.X.shape[1]) * self.bse
)
self.lob = (
self.params
- t.ppf(q=1 - alpha / 2.0, df=self.nobs - self.X.shape[1]) * self.bse
)
return np.squeeze(np.dstack([self.lob, self.upb]))
def fit_ipm(self, q, eps=1e-6):
"""Estimate coefficients using the interior point method.
Paramters
---------
q : double
Quantile value strictly between 0 and 1
eps : double
Duality gap stopping criterion
"""
coefs = _fit_coefs(self.X, self.y, q, eps)
return coefs
def fit_preproc_ipm(
self, q, rng, eps=1e-6, Mm_factor=0.8, max_bad_fixup=3, kappa_eps=1e-6
):
"""Preprocessing phase as described in Portnoy and Koenker,
Statistical Science, (1997) 279-300.
Python implementation of the R code "rq.fit.pfn".
As was cautioned, use only when the problem size is very large. The
recommended size of n according to the original author is > 100,000.
Parameters
----------
"""
X = self.X
y = self.y
n, p = X.shape
m = int(((p + 1) * n) ** (2 / 3))
not_optimal = True
while not_optimal:
if m < n:
s = rng.choice(n, m, replace=False)
else:
return _fit_coefs(X, y, q, eps)
xx = X[s]
yy = y[s]
xx = np.array(xx, np.double, copy=False, order="F", ndmin=1)
yy = np.array(yy, np.double, copy=False, order="F", ndmin=1)
first_coefs = _fit_coefs(xx, yy, q, eps)
xxinv = pinv(xx.T @ xx)
band = np.sqrt(((X @ xxinv) ** 2) @ np.ones(p))
r = y - X @ first_coefs
M = Mm_factor * m
lo_q = max(1 / n, q - M / (2 * n))
hi_q = min(q + M / (2 * n), (n - 1) / n)
kappa = np.quantile(r / np.maximum(kappa_eps, band), [lo_q, hi_q])
sl = r < band * kappa[0]
su = r > band * kappa[1]
bad_fixup = 0
while not_optimal & (bad_fixup < max_bad_fixup):
xx = X[~su & ~sl]
yy = y[~su & ~sl]
if any(sl):
glob_x = X[sl].T @ np.ones(np.sum(sl))
# Notes:
# 1. The resulting matrix is transposed one more time because np.ones is 1 dimensional.
# 2. Summing data with same residual signs will not change the residual sign of the summed.
glob_y = np.sum(y[sl])
xx = np.vstack([xx, glob_x])
yy = np.r_[yy, glob_y]
if any(su):
ghib_x = X[su].T @ np.ones(np.sum(su))
ghib_y = np.sum(y[su])
xx = np.vstack([xx, ghib_x])
yy = np.r_[yy, ghib_y]
xx = np.array(xx, np.double, copy=False, order="F", ndmin=1)
yy = np.array(yy, np.double, copy=False, order="F", ndmin=1)
coefs = _fit_coefs(xx, yy, q, eps)
r = y - X @ coefs
su_bad = (r < 0) & su
sl_bad = (r > 0) & sl
if any(np.r_[su_bad, sl_bad]):
if np.sum(sl_bad) + np.sum(su_bad) > 0.1 * M:
m = 2 * m
break
su = su & ~su_bad
sl = sl & ~sl_bad
bad_fixup = bad_fixup + 1
else:
not_optimal = False
return coefs
def cluster_cov(self, groups, beta, q, kappa_type="silverman"):
"""Covariance matrix estimator as proposed by <NAME> Silva (2013).
Translated from Stata code of qreg2.
Parameters
----------
groups : ndarray
The group index array.
beta : ndarray
The estimated parameter values.
q : double
The quantile strictly between 0 and 1.
kappa_type : str. Default 'silverman'.
The scaling factor for the bandwidth. Available rule of thumbs
type are ``silverman`` and ``median``.
"""
theta = q
n = len(self.X)
sort_args = groups.argsort(kind="mergesort")
self.X = self.X[sort_args]
self.y = self.y[sort_args]
groups = groups[sort_args]
self.X = np.array(self.X, np.double, copy=False, order="F", ndmin=1)
self.y = np.array(self.y, np.double, copy=False, order="F", ndmin=1)
groups = np.array(groups, np.int32, copy=False, order="F", ndmin=1)
G = len(np.unique(groups))
# Compute residuals
resid = self.y - self.X @ beta
# Compute A
# psi
psi_resid = psi_function(resid, theta)
A = matrix_opaccum(self.X, groups, psi_resid, G)
# Compute B
# fmt: off
# h_nG
h_nG = (invnormal(0.975)**(2/3)) * \
((1.5 * ((normalden(invnormal(theta)))**2) / (2 * ((invnormal(theta))**2) + 1))**(1/3)) * \
(n)**(-1/3)
# fmt: on
# kappa
if kappa_type == "median":
k = np.median(np.abs(resid))
elif kappa_type == "silverman":
k = min(
np.std(resid),
(np.percentile(resid, 75) - np.percentile(resid, 25)) / 1.34,
)
else:
raise ValueError(
"Incorrect kappa_type {}. Please choose between median and silverman".format(
kappa_type
)
)
# c^_G
chat_G = k * (invnormal(theta + h_nG) - invnormal(theta - h_nG))
# B weights
dens = np.sqrt((np.abs(resid) < chat_G).astype(np.float64) / (2 * chat_G))
_groups = np.arange(len(groups)).astype(np.int32)
B = matrix_opaccum(self.X, _groups, dens, n)
# Compute Binv A Binv
B = np.array(B, np.double, copy=False, order="F", ndmin=1)
lapack_cholesky_inv(B)
return B @ A @ B
def iid_robust_cov(self, beta, q, kernel, bandwidth, vcov="robust"):
"""Covariance matrix estimation for iid data as written in the statsmodels:
https://www.statsmodels.org/stable/_modules/statsmodels/regression/quantile_regression.html#QuantReg
Parameters
----------
kernel : str, kernel to use in the kernel density estimation for the
asymptotic covariance matrix:
- epa: Epanechnikov
- cos: Cosine
- gau: Gaussian
- par: Parzene
bandwidth : str, Bandwidth selection method in kernel density
estimation for asymptotic covariance estimate (full
references in QuantReg docstring):
- hsheather: Hall-Sheather (1988)
- bofinger: Bofinger (1975)
- chamberlain: Chamberlain (1994)
"""
kern_names = ["biw", "cos", "epa", "gau", "par"]
if kernel not in kern_names:
raise Exception("kernel must be one of " + ", ".join(kern_names))
else:
kernel = kernels[kernel]
if bandwidth == "hsheather":
bandwidth = hall_sheather
elif bandwidth == "bofinger":
bandwidth = bofinger
elif bandwidth == "chamberlain":
bandwidth = chamberlain
else:
raise Exception(
"bandwidth must be in 'hsheather', 'bofinger', 'chamberlain'"
)
# Compute residuals
resid = self.y - self.X @ beta
nobs = len(self.X)
iqre = stats.scoreatpercentile(resid, 75) - stats.scoreatpercentile(resid, 25)
h = bandwidth(nobs, q)
h = min(np.std(self.y), iqre / 1.34) * (norm.ppf(q + h) - norm.ppf(q - h))
fhat0 = 1.0 / (nobs * h) * np.sum(kernel(resid / h))
if vcov == "robust":
d = np.where(resid > 0, (q / fhat0) ** 2, ((1 - q) / fhat0) ** 2)
xtxi = pinv(np.dot(self.X.T, self.X))
xtdx = np.dot(self.X.T * d[np.newaxis, :], self.X)
vcov = xtxi @ xtdx @ xtxi
elif vcov == "iid":
vcov = (1.0 / fhat0) ** 2 * q * (1 - q) * pinv(np.dot(self.X.T, self.X))
return vcov
# fmt: off
# From https://www.statsmodels.org/stable/_modules/statsmodels/regression/quantile_regression.html#QuantReg.
def _parzen(u):
z = np.where(np.abs(u) <= .5, 4./3 - 8. * u**2 + 8. * np.abs(u)**3,
8. * (1 - np.abs(u))**3 / 3.)
z[np.abs(u) > 1] = 0
return z
kernels = {}
kernels['biw'] = lambda u: 15. / 16 * (1 - u**2)**2 * np.where(np.abs(u) <= 1, 1, 0)
kernels['cos'] = lambda u: np.where(np.abs(u) <= .5, 1 + np.cos(2 * np.pi * u), 0)
kernels['epa'] = lambda u: 3. / 4 * (1-u**2) * np.where(np.abs(u) <= 1, 1, 0)
kernels['par'] = _parzen
def hall_sheather(n, q, alpha=.05):
z = norm.ppf(q)
num = 1.5 * norm.pdf(z)**2.
den = 2. * z**2. + 1.
h = n**(-1. / 3) * norm.ppf(1. - alpha / 2.)**(2./3) * (num / den)**(1./3)
return h
def bofinger(n, q):
num = 9. / 2 * norm.pdf(2 * norm.ppf(q))**4
den = (2 * norm.ppf(q)**2 + 1)**2
h = n**(-1. / 5) * (num / den)**(1. / 5)
return h
def chamberlain(n, q, alpha=.05):
return norm.ppf(1 - alpha / 2) * np.sqrt(q*(1 - q) / n)
# fmt: on
def _fit_coefs(X, y, q, eps):
"""In cases of convergence issues, we increase the duality gap
tolerance.
"""
coefs = fit_coefs(X, y, q, eps)
while any(np.isnan(coefs)):
eps *= 5.0
coefs = fit_coefs(X, y, q, eps)
return coefs
| en | 0.622958 | Solve by interior point method (Mehrotra's predictor corrector algorithm). If n >= 100,000, it will use preprocessing step following Portnoy and Koenker (1997). Parameters ---------- q : double Quantile value strictly between 0 and 1 fit_method : str or None. Default None. Coefficient estimation method. - None : uses ipm if n < 100000, else, preproc-ipm. - ipm : interior point method. - preproc-ipm : interior point method with preprocessing. cov_type : str. Default 'robust'. Type of covariance estimator to use. Available types are ``iid`` for iid errors, ``robust`` for heteroskedastic errors, and ``cluster`` for clustered errors. seed : int or None Random seed to use if preproc-ipm is used for subsampling. kernel : str, kernel to use in the kernel density estimation for the asymptotic covariance matrix: - epa: Epanechnikov - cos: Cosine - gau: Gaussian - par: Parzene bandwidth : str, Bandwidth selection method in kernel density estimation for asymptotic covariance estimate (full references in QuantReg docstring): - hsheather: Hall-Sheather (1988) - bofinger: Bofinger (1975) - chamberlain: Chamberlain (1994) cov_kwds : dict Additional keywords used in the covariance specification. - groups : ndarray int type Integer-valued index of clusters or groups. Required if using the ``cluster`` cov_type. - kappa_type : str. Default 'silverman'. The scaling factor for the bandwidth. Available rule of thumbs type are ``silverman`` and ``median``. # Estimate covariance matrix # Compute two-sided p-values. Compute the confidence intervals. Parameters ---------- alpha : float Estimate coefficients using the interior point method. Paramters --------- q : double Quantile value strictly between 0 and 1 eps : double Duality gap stopping criterion Preprocessing phase as described in Portnoy and Koenker, Statistical Science, (1997) 279-300. Python implementation of the R code "rq.fit.pfn". As was cautioned, use only when the problem size is very large. The recommended size of n according to the original author is > 100,000. Parameters ---------- # Notes: # 1. The resulting matrix is transposed one more time because np.ones is 1 dimensional. # 2. Summing data with same residual signs will not change the residual sign of the summed. Covariance matrix estimator as proposed by <NAME> Silva (2013). Translated from Stata code of qreg2. Parameters ---------- groups : ndarray The group index array. beta : ndarray The estimated parameter values. q : double The quantile strictly between 0 and 1. kappa_type : str. Default 'silverman'. The scaling factor for the bandwidth. Available rule of thumbs type are ``silverman`` and ``median``. # Compute residuals # Compute A # psi # Compute B # fmt: off # h_nG # fmt: on # kappa # c^_G # B weights # Compute Binv A Binv Covariance matrix estimation for iid data as written in the statsmodels: https://www.statsmodels.org/stable/_modules/statsmodels/regression/quantile_regression.html#QuantReg Parameters ---------- kernel : str, kernel to use in the kernel density estimation for the asymptotic covariance matrix: - epa: Epanechnikov - cos: Cosine - gau: Gaussian - par: Parzene bandwidth : str, Bandwidth selection method in kernel density estimation for asymptotic covariance estimate (full references in QuantReg docstring): - hsheather: Hall-Sheather (1988) - bofinger: Bofinger (1975) - chamberlain: Chamberlain (1994) # Compute residuals # fmt: off # From https://www.statsmodels.org/stable/_modules/statsmodels/regression/quantile_regression.html#QuantReg. # fmt: on In cases of convergence issues, we increase the duality gap tolerance. | 2.217616 | 2 |
pyinstaller.py | xueqing-chen/pyinstaller | 0 | 6616594 | <filename>pyinstaller.py
from PyInstaller.__main__ import run
if __name__ == '__main__':
opts = ['TL.py', '-F', '-w', '--icon=ico.png']
run(opts) | <filename>pyinstaller.py
from PyInstaller.__main__ import run
if __name__ == '__main__':
opts = ['TL.py', '-F', '-w', '--icon=ico.png']
run(opts) | none | 1 | 1.763093 | 2 | |
py/cidoc_crm_types/properties/p19_was_intended_use_of.py | minorg/cidoc-crm-types | 0 | 6616595 | <reponame>minorg/cidoc-crm-types
from dataclasses import dataclass
@dataclass
class P19WasIntendedUseOf:
"""
Scope note:
This property relates an instance of E7 Activity with instances of E71 Human-Made Thing, created specifically for use in the activity.
This is distinct from the intended use of an item in some general type of activity such as the book of common prayer which was intended for use in Church of England services (see P101 had as general use (was use of)).
Examples:
- <NAME> wedding dress (E71) was made for Wedding of <NAME> and <NAME> (E7) mode of use To Be Worn (E55)
In First Order Logic:
P19(x,y) ⊃ E7(x)
P19(x,y) ⊃ E71(y)
P19(x,y,z) ⊃ [P19(x,y) ∧ E55(z)]
"""
URI = "http://erlangen-crm.org/current/P19_was_intended_use_of"
| from dataclasses import dataclass
@dataclass
class P19WasIntendedUseOf:
"""
Scope note:
This property relates an instance of E7 Activity with instances of E71 Human-Made Thing, created specifically for use in the activity.
This is distinct from the intended use of an item in some general type of activity such as the book of common prayer which was intended for use in Church of England services (see P101 had as general use (was use of)).
Examples:
- <NAME> wedding dress (E71) was made for Wedding of <NAME> and <NAME> (E7) mode of use To Be Worn (E55)
In First Order Logic:
P19(x,y) ⊃ E7(x)
P19(x,y) ⊃ E71(y)
P19(x,y,z) ⊃ [P19(x,y) ∧ E55(z)]
"""
URI = "http://erlangen-crm.org/current/P19_was_intended_use_of" | en | 0.940049 | Scope note: This property relates an instance of E7 Activity with instances of E71 Human-Made Thing, created specifically for use in the activity. This is distinct from the intended use of an item in some general type of activity such as the book of common prayer which was intended for use in Church of England services (see P101 had as general use (was use of)). Examples: - <NAME> wedding dress (E71) was made for Wedding of <NAME> and <NAME> (E7) mode of use To Be Worn (E55) In First Order Logic: P19(x,y) ⊃ E7(x) P19(x,y) ⊃ E71(y) P19(x,y,z) ⊃ [P19(x,y) ∧ E55(z)] | 2.402451 | 2 |
ch_4/f2c_qa.py | ProhardONE/python_primer | 51 | 6616596 | # Exercise 4.1
# Author: <NAME>
F = raw_input("Enter a temperature in Fahrenheit:\n")
F = float(F)
C = 5 / 9. * (F - 32)
print "%g Fahrenheit = %g Celsius" % (F, C)
"""
Sample run:
python f2c_qa.py
Enter a temperature in Fahrenheit:
243
243 Fahrenheit = 117.222 Celsius
"""
| # Exercise 4.1
# Author: <NAME>
F = raw_input("Enter a temperature in Fahrenheit:\n")
F = float(F)
C = 5 / 9. * (F - 32)
print "%g Fahrenheit = %g Celsius" % (F, C)
"""
Sample run:
python f2c_qa.py
Enter a temperature in Fahrenheit:
243
243 Fahrenheit = 117.222 Celsius
"""
| en | 0.333534 | # Exercise 4.1 # Author: <NAME> Sample run: python f2c_qa.py Enter a temperature in Fahrenheit: 243 243 Fahrenheit = 117.222 Celsius | 3.681527 | 4 |
scripts/misc/013_robustness.py | agalitsyna/sc_dros | 1 | 6616597 | import glob
from basic_utils import *
import numpy as np
import pandas as pd
experiment_ids = 'Cell1 Cell2 Cell3 Cell4 Cell5 Cell6 Cell7 Cell8 Cell9 Cell10 Cell11 Cell12 Cell13 Cell14 Cell15 Cell16 Cell17 Cell18 Cell19 Cell20 Cell21 Cell22 Cell23 Cell24 Cell25 Cell26 Cell27 Cell28 Cell29 Cell30 Cell31 Cell32 Cell33 Cell34 Cell35 Cell36 Cell37 Cell38 Cell39 Cell40 Cell41 Cell42 Cell43 Cell44 Cell45 Cell46 Cell47 Cell48 Cell49 Cell50 Cell51 Cell52 Cell53 Cell54 Cell55 Cell56 Cell57 Cell58 Cell59 Cell60 Cell61 Cell62 Cell63 Cell64 Cell65 Cell66 Cell67 Cell68 Cell69 Cell70 Cell71 Cell72 Cell73 Cell74 Cell75 Cell76 Cell77 Cell78 Cell79 Cell80'.split()
# Preliminary work, selection of ids:
for exp in experiment_ids:
fasta_list = glob.glob('../data/FASTQ/{}_*R1*.fastq'.format(exp))
output = "../data/IDS/{}.ids.txt".format(exp)
command = 'cat'
for f in fasta_list:
command += " <(awk 'NR % 4 == 1' {})".format(f)
command += " | gawk '{{match($0, \"@(.+) \", a)}} {{print a[1]}}' > {}".format(output)
run_command(command)
def create_selections(len_initial, start, end, step, add=False, replace=False):
"""
Creation of random selections of indices
"""
idx_full = np.arange(len_initial)
toreturn = []
assert (end-start)/step>=1
if add:
for i in range(start, end + step, step)[::-1]:
lst = np.random.choice(idx_full, i, replace=replace)
idx_full = lst.copy() #np.setdiff1d(idx_full, lst)
toreturn.append(lst)
else:
for i in range(start, end+step, step):
lst = np.random.choice(idx_full, i, replace=replace)
toreturn.append(lst)
toreturn.append(np.arange(len_initial))
return(toreturn)
from multiprocessing import Pool
import os
nthreads = 10
niter = 10
for exp in experiment_ids:
print(exp)
lst_total_add = []
lst_total_noadd = []
pairsam_dct = {}
idxfa_dct = {}
file_fa = "../data/IDS/{}.ids.txt".format(exp)
idxfa_dct[exp] = np.loadtxt(file_fa, dtype='S64')
files_pairsam = glob.glob('../data/PAIR/{}_*.pairsam.JJ'.format(exp))
exp_list = [x.split('/')[-1].split('.')[0] for x in files_pairsam]
pairsam_dct[exp] = read_pairsams(files_pairsam, exp_list, exp)
len_idxfa = len(idxfa_dct[exp])
if len_idxfa > 5000000:
step = 1000000
else:
step = 100000
lst_add = [(exp, x, n)
for n in range(niter)
for x in create_selections(len_idxfa, step, step*(len_idxfa//step), step, add=True, replace=False)
]
lst_total_add += lst_add
lst_noadd = [(exp, x, n)
for n in range(niter)
for x in create_selections(len_idxfa, step, step*(len_idxfa//step), step, add=False, replace=False)
]
lst_total_noadd += lst_noadd
def run_filter(args):
"""
Run selection of fasta indices specified by idx_selected list,
querying of df_pairsam and filtering of unique contacts.
Global parameters used (query by exp): idxfa_dct and pairsam_dct
:param exp: experiment label
:param idx_selected: numpy list (int) of selected numbers of indexes in fa
:param niter: metainfo
:return: stats
"""
exp, idx_selected, niter = args
idxfa = idxfa_dct[exp]
df = pairsam_dct[exp].copy()
idxpairsam = df.readID.values.astype('S64')
df.index = idxpairsam
idxfa_selected = idxfa[idx_selected].astype('S64')
idxpairsam_selected = np.intersect1d(idxfa_selected, idxpairsam)
df_selected = df.loc[idxpairsam_selected,:].reset_index(drop=True)
df_filtered, stats = filter_pair_df(df_selected)
stats['exp'] = exp
stats['niter'] = niter
stats['len_idxfa'] = len(idxfa)
stats['len_idxfa_selected'] = len(idxfa_selected)
stats['len_idxpairsam'] = len(idxpairsam)
stats['len_idxpairsam_selected'] = len(idxpairsam_selected)
return stats
print(len(idxfa_dct[exp]))
if len(idxfa_dct[exp])>30*1e6:
res = []
for i in lst_total_add:
res.append(run_filter(i))
res = pd.DataFrame(res)
else:
# Parallel analogue of run_filter([exp, idx_selected, 0])
p = Pool(nthreads)
res = pd.DataFrame(p.map(run_filter, lst_total_add))
if not os.path.isfile('../data/TABLES/robustness_analysis_add.txt'):
res.to_csv('../data/TABLES/robustness_analysis_add.txt')
else:
res.to_csv('../data/TABLES/robustness_analysis_add.txt', mode='a', header=False)
if len(idxfa_dct[exp])>30*1e6:
res = []
for i in lst_total_noadd:
res.append(run_filter(i))
res = pd.DataFrame(res)
else:
p = Pool(nthreads)
res = pd.DataFrame(p.map(run_filter, lst_total_noadd))
if not os.path.isfile('../data/TABLES/robustness_analysis_noadd.txt'):
res.to_csv('../data/TABLES/robustness_analysis_noadd.txt')
else:
res.to_csv('../data/TABLES/robustness_analysis_noadd.txt', mode='a', header=False)
del lst_total_add
del lst_total_noadd
del pairsam_dct
del idxfa_dct
| import glob
from basic_utils import *
import numpy as np
import pandas as pd
experiment_ids = 'Cell1 Cell2 Cell3 Cell4 Cell5 Cell6 Cell7 Cell8 Cell9 Cell10 Cell11 Cell12 Cell13 Cell14 Cell15 Cell16 Cell17 Cell18 Cell19 Cell20 Cell21 Cell22 Cell23 Cell24 Cell25 Cell26 Cell27 Cell28 Cell29 Cell30 Cell31 Cell32 Cell33 Cell34 Cell35 Cell36 Cell37 Cell38 Cell39 Cell40 Cell41 Cell42 Cell43 Cell44 Cell45 Cell46 Cell47 Cell48 Cell49 Cell50 Cell51 Cell52 Cell53 Cell54 Cell55 Cell56 Cell57 Cell58 Cell59 Cell60 Cell61 Cell62 Cell63 Cell64 Cell65 Cell66 Cell67 Cell68 Cell69 Cell70 Cell71 Cell72 Cell73 Cell74 Cell75 Cell76 Cell77 Cell78 Cell79 Cell80'.split()
# Preliminary work, selection of ids:
for exp in experiment_ids:
fasta_list = glob.glob('../data/FASTQ/{}_*R1*.fastq'.format(exp))
output = "../data/IDS/{}.ids.txt".format(exp)
command = 'cat'
for f in fasta_list:
command += " <(awk 'NR % 4 == 1' {})".format(f)
command += " | gawk '{{match($0, \"@(.+) \", a)}} {{print a[1]}}' > {}".format(output)
run_command(command)
def create_selections(len_initial, start, end, step, add=False, replace=False):
"""
Creation of random selections of indices
"""
idx_full = np.arange(len_initial)
toreturn = []
assert (end-start)/step>=1
if add:
for i in range(start, end + step, step)[::-1]:
lst = np.random.choice(idx_full, i, replace=replace)
idx_full = lst.copy() #np.setdiff1d(idx_full, lst)
toreturn.append(lst)
else:
for i in range(start, end+step, step):
lst = np.random.choice(idx_full, i, replace=replace)
toreturn.append(lst)
toreturn.append(np.arange(len_initial))
return(toreturn)
from multiprocessing import Pool
import os
nthreads = 10
niter = 10
for exp in experiment_ids:
print(exp)
lst_total_add = []
lst_total_noadd = []
pairsam_dct = {}
idxfa_dct = {}
file_fa = "../data/IDS/{}.ids.txt".format(exp)
idxfa_dct[exp] = np.loadtxt(file_fa, dtype='S64')
files_pairsam = glob.glob('../data/PAIR/{}_*.pairsam.JJ'.format(exp))
exp_list = [x.split('/')[-1].split('.')[0] for x in files_pairsam]
pairsam_dct[exp] = read_pairsams(files_pairsam, exp_list, exp)
len_idxfa = len(idxfa_dct[exp])
if len_idxfa > 5000000:
step = 1000000
else:
step = 100000
lst_add = [(exp, x, n)
for n in range(niter)
for x in create_selections(len_idxfa, step, step*(len_idxfa//step), step, add=True, replace=False)
]
lst_total_add += lst_add
lst_noadd = [(exp, x, n)
for n in range(niter)
for x in create_selections(len_idxfa, step, step*(len_idxfa//step), step, add=False, replace=False)
]
lst_total_noadd += lst_noadd
def run_filter(args):
"""
Run selection of fasta indices specified by idx_selected list,
querying of df_pairsam and filtering of unique contacts.
Global parameters used (query by exp): idxfa_dct and pairsam_dct
:param exp: experiment label
:param idx_selected: numpy list (int) of selected numbers of indexes in fa
:param niter: metainfo
:return: stats
"""
exp, idx_selected, niter = args
idxfa = idxfa_dct[exp]
df = pairsam_dct[exp].copy()
idxpairsam = df.readID.values.astype('S64')
df.index = idxpairsam
idxfa_selected = idxfa[idx_selected].astype('S64')
idxpairsam_selected = np.intersect1d(idxfa_selected, idxpairsam)
df_selected = df.loc[idxpairsam_selected,:].reset_index(drop=True)
df_filtered, stats = filter_pair_df(df_selected)
stats['exp'] = exp
stats['niter'] = niter
stats['len_idxfa'] = len(idxfa)
stats['len_idxfa_selected'] = len(idxfa_selected)
stats['len_idxpairsam'] = len(idxpairsam)
stats['len_idxpairsam_selected'] = len(idxpairsam_selected)
return stats
print(len(idxfa_dct[exp]))
if len(idxfa_dct[exp])>30*1e6:
res = []
for i in lst_total_add:
res.append(run_filter(i))
res = pd.DataFrame(res)
else:
# Parallel analogue of run_filter([exp, idx_selected, 0])
p = Pool(nthreads)
res = pd.DataFrame(p.map(run_filter, lst_total_add))
if not os.path.isfile('../data/TABLES/robustness_analysis_add.txt'):
res.to_csv('../data/TABLES/robustness_analysis_add.txt')
else:
res.to_csv('../data/TABLES/robustness_analysis_add.txt', mode='a', header=False)
if len(idxfa_dct[exp])>30*1e6:
res = []
for i in lst_total_noadd:
res.append(run_filter(i))
res = pd.DataFrame(res)
else:
p = Pool(nthreads)
res = pd.DataFrame(p.map(run_filter, lst_total_noadd))
if not os.path.isfile('../data/TABLES/robustness_analysis_noadd.txt'):
res.to_csv('../data/TABLES/robustness_analysis_noadd.txt')
else:
res.to_csv('../data/TABLES/robustness_analysis_noadd.txt', mode='a', header=False)
del lst_total_add
del lst_total_noadd
del pairsam_dct
del idxfa_dct
| en | 0.663551 | # Preliminary work, selection of ids: Creation of random selections of indices #np.setdiff1d(idx_full, lst) Run selection of fasta indices specified by idx_selected list, querying of df_pairsam and filtering of unique contacts. Global parameters used (query by exp): idxfa_dct and pairsam_dct :param exp: experiment label :param idx_selected: numpy list (int) of selected numbers of indexes in fa :param niter: metainfo :return: stats # Parallel analogue of run_filter([exp, idx_selected, 0]) | 2.325485 | 2 |
example/models.py | reimibeta/django-datetime-utils | 0 | 6616598 | from django.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django_datetime.datetime import datetime
# from django.utils import timezone
class Example(models.Model):
date = models.DateField(default=datetime.dnow())
date_update = models.DateField(blank=True, null=True)
datetime = models.DateTimeField(default=datetime.dtnow())
datetime_update = models.DateTimeField(blank=True, null=True)
def __str__(self):
return "Date: {}".format(self.date)
@receiver(pre_save, sender=Example)
def update(sender, instance, **kwargs):
if instance is None:
pass
else:
instance.date_update = datetime.dnow(str=True)
# pass
# instance.datetime_update = datetime.dtnow(str=True)
| from django.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django_datetime.datetime import datetime
# from django.utils import timezone
class Example(models.Model):
date = models.DateField(default=datetime.dnow())
date_update = models.DateField(blank=True, null=True)
datetime = models.DateTimeField(default=datetime.dtnow())
datetime_update = models.DateTimeField(blank=True, null=True)
def __str__(self):
return "Date: {}".format(self.date)
@receiver(pre_save, sender=Example)
def update(sender, instance, **kwargs):
if instance is None:
pass
else:
instance.date_update = datetime.dnow(str=True)
# pass
# instance.datetime_update = datetime.dtnow(str=True)
| en | 0.274703 | # from django.utils import timezone # pass # instance.datetime_update = datetime.dtnow(str=True) | 2.381702 | 2 |
exhibitionist/providers/websocket/handlers.py | kentfrazier/Exhibitionist | 2 | 6616599 | <reponame>kentfrazier/Exhibitionist<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import print_function
import logging
import json
import six
from exhibitionist.isubscriber import ISubscriber
from tornado import websocket
from exhibitionist.decorators import http_handler,GET_REG_OBJ_ATTR
import exhibitionist.log
from exhibitionist.toolbox import WSMsg, ExhibitionistRequestHandler
logger = exhibitionist.log.getLogger(__name__)
context = None # dummy symbol
@http_handler(r'/ws$')
class WebSocketEvents(ISubscriber,ExhibitionistRequestHandler, websocket.WebSocketHandler,):
"""Websockets events handler
Messages going over the websocket from/to the client must be a dict with mandatory
fields "msg_type" and "payload" fields.
Defines msg_type(s): "PUB","SUB","ACK","NAK"
Examples Messages
dict(msg_type="SUB",channel="ch") - client subscribe request for channel "ch"
dict(msg_type="PUB",payload="p",channel="ch") -
clients publish payload "p" to channel "ch"
dict(msg_type="ACK",payload={message}) - reply from server indicating
that previous request (PUB or SUB) was successful
dict(msg_type="ACK",payload={message}) - reply from server indicating
that previous request (PUB or SUB) failed
"""
def __init__(self,*args,**kwds):
super(WebSocketEvents, self).__init__(*args,**kwds)
self.canary=object()
# ISubscriber interface
def notify(self,channel, payload):
logger.debug('sending to websocket ' + str(payload))
self.write_message(payload) # WebSocketHandler method
def open(self, objid=None):
logger.debug('WebSocket opened')
def on_message(self, message):
logger.debug('WSMsg Received: ' + str(message))
try:
message = json.loads(message)
except Exception as e: # pragma: no cover
self.write_message(WSMsg(msg_type="NAK",payload="Malformed payload"))
logger.error(str(e))
return
try:
message['msg_type']
except Exception as e: # pragma: no cover
self.write_message(WSMsg(msg_type="NAK",payload="Message must have 'msg_type' field"))
# logger.error(str(e))
return
try:
if message.get("msg_type") == "SUB":
channel = message.get("channel")
if not channel:
self.write_message(WSMsg(msg_type="NAK",payload="missing/invalid channel field"))
else:
context.pubsub.subscribe(self,channel)
self.write_message(WSMsg(msg_type="ACK",payload=message))
elif message.get("msg_type") == "PUB":
channel = message.get("channel")
payload = message.get("payload")
if channel is None:
self.write_message(WSMsg(msg_type="NAK",payload="missing channel field"))
if payload is None:
self.write_message(WSMsg(msg_type="NAK",payload="missing payload"))
else:
self.write_message(WSMsg(msg_type="ACK",payload=message))
context.pubsub.publish(six.text_type(channel), payload,self)
except Exception as e: # pragma: no cover
self.write_message(WSMsg(msg_type="NAK",payload="An error has occured"))
logger.error(str(e))
return
def on_close(self):
# unregister client from pubsub
context.pubsub.unsubscribe(self)
logger.debug('WebSocket closed')
| # -*- coding: utf-8 -*-
from __future__ import print_function
import logging
import json
import six
from exhibitionist.isubscriber import ISubscriber
from tornado import websocket
from exhibitionist.decorators import http_handler,GET_REG_OBJ_ATTR
import exhibitionist.log
from exhibitionist.toolbox import WSMsg, ExhibitionistRequestHandler
logger = exhibitionist.log.getLogger(__name__)
context = None # dummy symbol
@http_handler(r'/ws$')
class WebSocketEvents(ISubscriber,ExhibitionistRequestHandler, websocket.WebSocketHandler,):
"""Websockets events handler
Messages going over the websocket from/to the client must be a dict with mandatory
fields "msg_type" and "payload" fields.
Defines msg_type(s): "PUB","SUB","ACK","NAK"
Examples Messages
dict(msg_type="SUB",channel="ch") - client subscribe request for channel "ch"
dict(msg_type="PUB",payload="p",channel="ch") -
clients publish payload "p" to channel "ch"
dict(msg_type="ACK",payload={message}) - reply from server indicating
that previous request (PUB or SUB) was successful
dict(msg_type="ACK",payload={message}) - reply from server indicating
that previous request (PUB or SUB) failed
"""
def __init__(self,*args,**kwds):
super(WebSocketEvents, self).__init__(*args,**kwds)
self.canary=object()
# ISubscriber interface
def notify(self,channel, payload):
logger.debug('sending to websocket ' + str(payload))
self.write_message(payload) # WebSocketHandler method
def open(self, objid=None):
logger.debug('WebSocket opened')
def on_message(self, message):
logger.debug('WSMsg Received: ' + str(message))
try:
message = json.loads(message)
except Exception as e: # pragma: no cover
self.write_message(WSMsg(msg_type="NAK",payload="Malformed payload"))
logger.error(str(e))
return
try:
message['msg_type']
except Exception as e: # pragma: no cover
self.write_message(WSMsg(msg_type="NAK",payload="Message must have 'msg_type' field"))
# logger.error(str(e))
return
try:
if message.get("msg_type") == "SUB":
channel = message.get("channel")
if not channel:
self.write_message(WSMsg(msg_type="NAK",payload="missing/invalid channel field"))
else:
context.pubsub.subscribe(self,channel)
self.write_message(WSMsg(msg_type="ACK",payload=message))
elif message.get("msg_type") == "PUB":
channel = message.get("channel")
payload = message.get("payload")
if channel is None:
self.write_message(WSMsg(msg_type="NAK",payload="missing channel field"))
if payload is None:
self.write_message(WSMsg(msg_type="NAK",payload="missing payload"))
else:
self.write_message(WSMsg(msg_type="ACK",payload=message))
context.pubsub.publish(six.text_type(channel), payload,self)
except Exception as e: # pragma: no cover
self.write_message(WSMsg(msg_type="NAK",payload="An error has occured"))
logger.error(str(e))
return
def on_close(self):
# unregister client from pubsub
context.pubsub.unsubscribe(self)
logger.debug('WebSocket closed') | en | 0.741623 | # -*- coding: utf-8 -*- # dummy symbol Websockets events handler Messages going over the websocket from/to the client must be a dict with mandatory fields "msg_type" and "payload" fields. Defines msg_type(s): "PUB","SUB","ACK","NAK" Examples Messages dict(msg_type="SUB",channel="ch") - client subscribe request for channel "ch" dict(msg_type="PUB",payload="p",channel="ch") - clients publish payload "p" to channel "ch" dict(msg_type="ACK",payload={message}) - reply from server indicating that previous request (PUB or SUB) was successful dict(msg_type="ACK",payload={message}) - reply from server indicating that previous request (PUB or SUB) failed # ISubscriber interface # WebSocketHandler method # pragma: no cover # pragma: no cover # logger.error(str(e)) # pragma: no cover # unregister client from pubsub | 2.094145 | 2 |
sklearn_wrapper/modules/commons/Flattener.py | hidetomo-watanabe/analyze_for_kaggle | 3 | 6616600 | from functools import reduce
from operator import mul
class Flattener(object):
def __init__(self):
pass
def fit(self, X, y=None):
return self
def fit_resample(self, X, y=None):
self.fit(X, y)
if X.ndim > 1:
X = X.reshape(
(-1, reduce(mul, X.shape[1:])))
return X, y
| from functools import reduce
from operator import mul
class Flattener(object):
def __init__(self):
pass
def fit(self, X, y=None):
return self
def fit_resample(self, X, y=None):
self.fit(X, y)
if X.ndim > 1:
X = X.reshape(
(-1, reduce(mul, X.shape[1:])))
return X, y
| none | 1 | 3.193913 | 3 | |
pyopenproject/business/services/command/membership/delete.py | webu/pyopenproject | 5 | 6616601 | <filename>pyopenproject/business/services/command/membership/delete.py
from pyopenproject.api_connection.exceptions.request_exception import RequestError
from pyopenproject.api_connection.requests.delete_request import DeleteRequest
from pyopenproject.business.exception.business_error import BusinessError
from pyopenproject.business.services.command.membership.membership_command import MembershipCommand
class Delete(MembershipCommand):
def __init__(self, connection, membership):
super().__init__(connection)
self.membership = membership
def execute(self):
try:
DeleteRequest(self.connection, f"{self.CONTEXT}/{self.membership.id}").execute()
except RequestError as re:
raise BusinessError(f"Error deleting membership by id: {self.membership.id}") from re
| <filename>pyopenproject/business/services/command/membership/delete.py
from pyopenproject.api_connection.exceptions.request_exception import RequestError
from pyopenproject.api_connection.requests.delete_request import DeleteRequest
from pyopenproject.business.exception.business_error import BusinessError
from pyopenproject.business.services.command.membership.membership_command import MembershipCommand
class Delete(MembershipCommand):
def __init__(self, connection, membership):
super().__init__(connection)
self.membership = membership
def execute(self):
try:
DeleteRequest(self.connection, f"{self.CONTEXT}/{self.membership.id}").execute()
except RequestError as re:
raise BusinessError(f"Error deleting membership by id: {self.membership.id}") from re
| none | 1 | 2.535139 | 3 | |
examples/mmt/feature_extraction/__init__.py | knifefield/SLUDA | 3 | 6616602 | from __future__ import absolute_import
from .cnn import extract_cnn_feature
from .database import FeatureDatabase
__all__ = [
'extract_cnn_feature',
'FeatureDatabase',
]
| from __future__ import absolute_import
from .cnn import extract_cnn_feature
from .database import FeatureDatabase
__all__ = [
'extract_cnn_feature',
'FeatureDatabase',
]
| none | 1 | 1.04654 | 1 | |
code/tests/unit/conftest.py | CiscoSecurity/tr-05-serverless-jupiterone | 1 | 6616603 | <filename>code/tests/unit/conftest.py
import jwt
from app import app
from pytest import fixture
from http import HTTPStatus
from unittest.mock import MagicMock
from api.errors import INVALID_ARGUMENT
from tests.unit.payloads_for_tests import PRIVATE_KEY
@fixture(scope='session')
def client():
app.rsa_private_key = PRIVATE_KEY
app.testing = True
with app.test_client() as client:
yield client
@fixture(scope='session')
def valid_jwt(client):
def _make_jwt(
token='<PASSWORD>',
host='some_host.logrhythm',
jwks_host='visibility.amp.cisco.com',
aud='http://localhost',
kid='<KEY>',
wrong_structure=False,
wrong_jwks_host=False
):
payload = {
'token': token,
'host': host,
'jwks_host': jwks_host,
'aud': aud,
}
if wrong_jwks_host:
payload.pop('jwks_host')
if wrong_structure:
payload.pop('token')
return jwt.encode(
payload, client.application.rsa_private_key, algorithm='RS256',
headers={
'kid': kid
}
)
return _make_jwt
@fixture(scope='module')
def invalid_json_expected_payload():
def _make_message(message):
return {
'errors': [{
'code': INVALID_ARGUMENT,
'message': message,
'type': 'fatal'
}]
}
return _make_message
def mock_api_response(status_code=HTTPStatus.OK, payload=None):
mock_response = MagicMock()
mock_response.status_code = status_code
mock_response.ok = status_code == HTTPStatus.OK
mock_response.json = lambda: payload
return mock_response
@fixture(scope='module')
def ssl_error_expected_relay_response():
return {
'errors':
[
{
'code': 'unknown',
'message':
'Unable to verify SSL certificate: '
'Self signed certificate',
'type': 'fatal'
}
]
}
@fixture
def mock_exception_for_ssl_error():
mock_response = MagicMock()
mock_response.reason.args.__getitem__().verify_message = 'Self signed' \
' certificate'
return mock_response
@fixture(scope='module')
def connection_error_expected_relay_response():
return {
'errors':
[
{
'code': 'connection error',
'message':
'Unable to connect to LogRhythm, '
'validate the configured API endpoint: '
'https://some_host.logrhythm/lr-search-api/actions/'
'search-task',
'type': 'fatal'
}
]
}
@fixture(scope='module')
def authorization_error_expected_relay_response():
return {
'errors':
[
{
'code': 'authorization error',
'message': 'Authorization failed: Wrong token',
'type': 'fatal'
}
]
}
| <filename>code/tests/unit/conftest.py
import jwt
from app import app
from pytest import fixture
from http import HTTPStatus
from unittest.mock import MagicMock
from api.errors import INVALID_ARGUMENT
from tests.unit.payloads_for_tests import PRIVATE_KEY
@fixture(scope='session')
def client():
app.rsa_private_key = PRIVATE_KEY
app.testing = True
with app.test_client() as client:
yield client
@fixture(scope='session')
def valid_jwt(client):
def _make_jwt(
token='<PASSWORD>',
host='some_host.logrhythm',
jwks_host='visibility.amp.cisco.com',
aud='http://localhost',
kid='<KEY>',
wrong_structure=False,
wrong_jwks_host=False
):
payload = {
'token': token,
'host': host,
'jwks_host': jwks_host,
'aud': aud,
}
if wrong_jwks_host:
payload.pop('jwks_host')
if wrong_structure:
payload.pop('token')
return jwt.encode(
payload, client.application.rsa_private_key, algorithm='RS256',
headers={
'kid': kid
}
)
return _make_jwt
@fixture(scope='module')
def invalid_json_expected_payload():
def _make_message(message):
return {
'errors': [{
'code': INVALID_ARGUMENT,
'message': message,
'type': 'fatal'
}]
}
return _make_message
def mock_api_response(status_code=HTTPStatus.OK, payload=None):
mock_response = MagicMock()
mock_response.status_code = status_code
mock_response.ok = status_code == HTTPStatus.OK
mock_response.json = lambda: payload
return mock_response
@fixture(scope='module')
def ssl_error_expected_relay_response():
return {
'errors':
[
{
'code': 'unknown',
'message':
'Unable to verify SSL certificate: '
'Self signed certificate',
'type': 'fatal'
}
]
}
@fixture
def mock_exception_for_ssl_error():
mock_response = MagicMock()
mock_response.reason.args.__getitem__().verify_message = 'Self signed' \
' certificate'
return mock_response
@fixture(scope='module')
def connection_error_expected_relay_response():
return {
'errors':
[
{
'code': 'connection error',
'message':
'Unable to connect to LogRhythm, '
'validate the configured API endpoint: '
'https://some_host.logrhythm/lr-search-api/actions/'
'search-task',
'type': 'fatal'
}
]
}
@fixture(scope='module')
def authorization_error_expected_relay_response():
return {
'errors':
[
{
'code': 'authorization error',
'message': 'Authorization failed: Wrong token',
'type': 'fatal'
}
]
}
| none | 1 | 2.384768 | 2 | |
virtualreality/calibration/camera_calibration.py | sahasam/hobo_vr | 0 | 6616604 | """
calculate camera distortion matrix
saves matrix to camera_matrix.pickle
press s to save image from camera, and q to close camera and start calibration
usage:
pyvr calibrate-cam [options]
Options:
-c, --camera <camera> Source of the camera to use for calibration [default: 0]
"""
import logging
import os
from docopt import docopt
import sys
import pickle
import cv2
import numpy as np
from virtualreality import __version__
images_location = os.path.join(os.path.dirname(__file__), 'calibration_images')
print(images_location)
CHECKERBOARD = (6,9)
def save_images_to_process(cam=0) :
"""save images from specified camera to calibration_images directory for processing
Keyword Arguments:
cam {int} -- reference to usb camera input (default: {0})
"""
img_counter = 1
cap = cv2.VideoCapture(cam)
while cap.isOpened() :
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if not ret :
logging.error("failed to receive image from camera")
break
valid_img, corners = cv2.findChessboardCorners(gray, CHECKERBOARD, cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE)
if valid_img :
corners2 = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001))
cv2.drawChessboardCorners(frame, CHECKERBOARD, corners2, True)
cv2.imshow('drawn_corners', frame)
key = cv2.waitKey(1)
if key & 0xFF == ord('q') or img_counter >= 20 :
# press q to stop
cap.release()
cv2.destroyAllWindows()
if img_counter == 1 :
exit
break
elif key & 0xFF == ord('s') :
# press s to save image
if valid_img:
print(f"saving image {img_counter}")
cv2.imwrite(os.path.join(images_location, f"image{img_counter}.jpg"), gray)
img_counter += 1
else:
print("no checkerboard found. Not Saving.")
def calibrate_camera(cam=0) :
"""calculate camera matrix from precollected images
Keyword Arguments:
cam {int} -- reference to usb camera input (default: {0})
"""
# Checkerboard dimensions that the method looks for
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
object_points = []
image_points = []
obj_p = np.zeros((1,CHECKERBOARD[0]*CHECKERBOARD[1], 3), np.float32)
obj_p[0,:,:2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1,2)
if not os.path.exists(images_location) :
os.makedirs(images_location)
save_images_to_process(0)
elif len(os.listdir(images_location)) == 0 :
save_images_to_process(0)
for image_path in os.listdir(images_location) :
input_path = os.path.join(images_location, image_path)
frame = cv2.imread(input_path)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
valid_img, corners = cv2.findChessboardCorners(gray, CHECKERBOARD, cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE)
if valid_img :
object_points.append(obj_p)
corners2 = cv2.cornerSubPix(gray, corners, (11,13), (-1,-1), (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001))
image_points.append(corners2)
else :
logging.warning(f"could not find pattern in {image_path}. Ignoring")
#calibrate camera with aggregated object_points and image_points from samples
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(object_points, image_points, (640,480), None, None)
logging.info("camera matrix: ")
logging.info(mtx)
camera_calibration_file = open(os.path.join(os.path.dirname(__file__), "camera_matrix.pickle"), 'wb')
pickle.dump(mtx, camera_calibration_file)
camera_calibration_file.close()
def main() :
# allow calling from both python -m and from pyvr:
argv = sys.argv[1:]
if sys.argv[1] != "calibrate-cam":
argv = ["calibrate-cam"] + argv
args = docopt(__doc__, version=f"pyvr version {__version__}", argv=argv)
cam = int(args["--camera"]) if args["--camera"].isdigit() else args["--camera"]
calibrate_camera(0)
if __name__ == "__main__" :
main() | """
calculate camera distortion matrix
saves matrix to camera_matrix.pickle
press s to save image from camera, and q to close camera and start calibration
usage:
pyvr calibrate-cam [options]
Options:
-c, --camera <camera> Source of the camera to use for calibration [default: 0]
"""
import logging
import os
from docopt import docopt
import sys
import pickle
import cv2
import numpy as np
from virtualreality import __version__
images_location = os.path.join(os.path.dirname(__file__), 'calibration_images')
print(images_location)
CHECKERBOARD = (6,9)
def save_images_to_process(cam=0) :
"""save images from specified camera to calibration_images directory for processing
Keyword Arguments:
cam {int} -- reference to usb camera input (default: {0})
"""
img_counter = 1
cap = cv2.VideoCapture(cam)
while cap.isOpened() :
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if not ret :
logging.error("failed to receive image from camera")
break
valid_img, corners = cv2.findChessboardCorners(gray, CHECKERBOARD, cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE)
if valid_img :
corners2 = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001))
cv2.drawChessboardCorners(frame, CHECKERBOARD, corners2, True)
cv2.imshow('drawn_corners', frame)
key = cv2.waitKey(1)
if key & 0xFF == ord('q') or img_counter >= 20 :
# press q to stop
cap.release()
cv2.destroyAllWindows()
if img_counter == 1 :
exit
break
elif key & 0xFF == ord('s') :
# press s to save image
if valid_img:
print(f"saving image {img_counter}")
cv2.imwrite(os.path.join(images_location, f"image{img_counter}.jpg"), gray)
img_counter += 1
else:
print("no checkerboard found. Not Saving.")
def calibrate_camera(cam=0) :
"""calculate camera matrix from precollected images
Keyword Arguments:
cam {int} -- reference to usb camera input (default: {0})
"""
# Checkerboard dimensions that the method looks for
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
object_points = []
image_points = []
obj_p = np.zeros((1,CHECKERBOARD[0]*CHECKERBOARD[1], 3), np.float32)
obj_p[0,:,:2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1,2)
if not os.path.exists(images_location) :
os.makedirs(images_location)
save_images_to_process(0)
elif len(os.listdir(images_location)) == 0 :
save_images_to_process(0)
for image_path in os.listdir(images_location) :
input_path = os.path.join(images_location, image_path)
frame = cv2.imread(input_path)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
valid_img, corners = cv2.findChessboardCorners(gray, CHECKERBOARD, cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE)
if valid_img :
object_points.append(obj_p)
corners2 = cv2.cornerSubPix(gray, corners, (11,13), (-1,-1), (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001))
image_points.append(corners2)
else :
logging.warning(f"could not find pattern in {image_path}. Ignoring")
#calibrate camera with aggregated object_points and image_points from samples
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(object_points, image_points, (640,480), None, None)
logging.info("camera matrix: ")
logging.info(mtx)
camera_calibration_file = open(os.path.join(os.path.dirname(__file__), "camera_matrix.pickle"), 'wb')
pickle.dump(mtx, camera_calibration_file)
camera_calibration_file.close()
def main() :
# allow calling from both python -m and from pyvr:
argv = sys.argv[1:]
if sys.argv[1] != "calibrate-cam":
argv = ["calibrate-cam"] + argv
args = docopt(__doc__, version=f"pyvr version {__version__}", argv=argv)
cam = int(args["--camera"]) if args["--camera"].isdigit() else args["--camera"]
calibrate_camera(0)
if __name__ == "__main__" :
main() | en | 0.751637 | calculate camera distortion matrix saves matrix to camera_matrix.pickle press s to save image from camera, and q to close camera and start calibration usage: pyvr calibrate-cam [options] Options: -c, --camera <camera> Source of the camera to use for calibration [default: 0] save images from specified camera to calibration_images directory for processing Keyword Arguments: cam {int} -- reference to usb camera input (default: {0}) # press q to stop # press s to save image calculate camera matrix from precollected images Keyword Arguments: cam {int} -- reference to usb camera input (default: {0}) # Checkerboard dimensions that the method looks for #calibrate camera with aggregated object_points and image_points from samples # allow calling from both python -m and from pyvr: | 2.812136 | 3 |
app/blueprints/api/v1/lesson.py | jnsdrtlf/et | 0 | 6616605 | from flask import Blueprint, jsonify, request, g, redirect, url_for
from sqlalchemy import exists
from app.models import db
from app.models.enums import Status, Role, Weekday
from app.models.lesson import Lesson, LessonSchema
from app.models.time import Time
from app.models.user import User
from app.utils import requires_auth_status, requires_auth
from app.utils.tasks import tasks
bp = Blueprint('lesson_api1', __name__)
lesson_schema = LessonSchema()
lesson_schemas = LessonSchema(many=True)
@bp.route('/', methods=['POST'])
@requires_auth_status()
def lesson_post():
"""POST /
Create new `Lesson
:return: JSON object with `success`, `reason` and `redirect` fields
"""
if g.session.user.status != Status.accepted or \
g.session.user.role != Role.tutor:
return jsonify({'success': False, 'reason': 'forbidden'}), 403
try:
weekday = int(request.values.get('weekday', -1))
time = int(request.values.get('time', -1))
print(time)
if time == -1 or \
not db.session.query(exists().where(Time.id == time)).scalar():
return jsonify({'success': False, 'reason': 'time'}), 401
if weekday == -1 or weekday not in Weekday.to_list():
return jsonify({'success': False, 'reason': 'weekday'}), 401
lesson = Lesson()
lesson.school_id = g.session.user.school_id
lesson.tutor_id = g.session.user_id
lesson.weekday = Weekday(weekday)
lesson.time_id = time
db.session.add(lesson)
db.session.commit()
tasks.create_events_now(lesson.id)
return redirect(url_for('index.index'))
#return lesson_schema.jsonify(lesson)
except Exception as e:
raise e
return jsonify({'success': False, 'reason': 'other'}), 401
@bp.route('/user/<int:user_id>', methods=['GET'])
@requires_auth()
def get_lesson_by_user(user_id):
""" GET /user/<int:user_id>
Get all lessons of a specific user where user_id is the unique id of
the user in question.
:return: `LessonSchema`
"""
_lessons = db.session.query(Lesson).join(User, Lesson.students) \
.filter(
(User.id == user_id) |
(Lesson.tutor_id == user_id)) \
.all()
return lesson_schemas.jsonify(_lessons)
| from flask import Blueprint, jsonify, request, g, redirect, url_for
from sqlalchemy import exists
from app.models import db
from app.models.enums import Status, Role, Weekday
from app.models.lesson import Lesson, LessonSchema
from app.models.time import Time
from app.models.user import User
from app.utils import requires_auth_status, requires_auth
from app.utils.tasks import tasks
bp = Blueprint('lesson_api1', __name__)
lesson_schema = LessonSchema()
lesson_schemas = LessonSchema(many=True)
@bp.route('/', methods=['POST'])
@requires_auth_status()
def lesson_post():
"""POST /
Create new `Lesson
:return: JSON object with `success`, `reason` and `redirect` fields
"""
if g.session.user.status != Status.accepted or \
g.session.user.role != Role.tutor:
return jsonify({'success': False, 'reason': 'forbidden'}), 403
try:
weekday = int(request.values.get('weekday', -1))
time = int(request.values.get('time', -1))
print(time)
if time == -1 or \
not db.session.query(exists().where(Time.id == time)).scalar():
return jsonify({'success': False, 'reason': 'time'}), 401
if weekday == -1 or weekday not in Weekday.to_list():
return jsonify({'success': False, 'reason': 'weekday'}), 401
lesson = Lesson()
lesson.school_id = g.session.user.school_id
lesson.tutor_id = g.session.user_id
lesson.weekday = Weekday(weekday)
lesson.time_id = time
db.session.add(lesson)
db.session.commit()
tasks.create_events_now(lesson.id)
return redirect(url_for('index.index'))
#return lesson_schema.jsonify(lesson)
except Exception as e:
raise e
return jsonify({'success': False, 'reason': 'other'}), 401
@bp.route('/user/<int:user_id>', methods=['GET'])
@requires_auth()
def get_lesson_by_user(user_id):
""" GET /user/<int:user_id>
Get all lessons of a specific user where user_id is the unique id of
the user in question.
:return: `LessonSchema`
"""
_lessons = db.session.query(Lesson).join(User, Lesson.students) \
.filter(
(User.id == user_id) |
(Lesson.tutor_id == user_id)) \
.all()
return lesson_schemas.jsonify(_lessons)
| en | 0.602309 | POST / Create new `Lesson :return: JSON object with `success`, `reason` and `redirect` fields #return lesson_schema.jsonify(lesson) GET /user/<int:user_id> Get all lessons of a specific user where user_id is the unique id of the user in question. :return: `LessonSchema` | 2.334549 | 2 |
leetcode/flatten-nested-list-iterator/solution.py | mmcloughlin/problems | 11 | 6616606 | class NestedIterator(object):
def __init__(self, nestedList):
"""
Initialize your data structure here.
:type nestedList: List[NestedInteger]
"""
self.stack = [nestedList]
def next(self):
"""
:rtype: int
"""
x = self.stack[-1][0].getInteger()
self.stack[-1] = self.stack[-1][1:]
return x
def hasNext(self):
"""
:rtype: bool
"""
while len(self.stack) > 0:
if len(self.stack[-1]) == 0:
self.stack = self.stack[:-1]
continue
if self.stack[-1][0].isInteger():
break
l = self.stack[-1][0].getList()
self.stack[-1] = self.stack[-1][1:]
self.stack.append(l)
return len(self.stack) > 0
# """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
# class NestedInteger(object):
# def isInteger(self):
# """
# @return True if this NestedInteger holds a single integer, rather than a nested list.
# :rtype bool
# """
#
# def getInteger(self):
# """
# @return the single integer that this NestedInteger holds, if it holds a single integer
# Return None if this NestedInteger holds a nested list
# :rtype int
# """
#
# def getList(self):
# """
# @return the nested list that this NestedInteger holds, if it holds a nested list
# Return None if this NestedInteger holds a single integer
# :rtype List[NestedInteger]
# """
| class NestedIterator(object):
def __init__(self, nestedList):
"""
Initialize your data structure here.
:type nestedList: List[NestedInteger]
"""
self.stack = [nestedList]
def next(self):
"""
:rtype: int
"""
x = self.stack[-1][0].getInteger()
self.stack[-1] = self.stack[-1][1:]
return x
def hasNext(self):
"""
:rtype: bool
"""
while len(self.stack) > 0:
if len(self.stack[-1]) == 0:
self.stack = self.stack[:-1]
continue
if self.stack[-1][0].isInteger():
break
l = self.stack[-1][0].getList()
self.stack[-1] = self.stack[-1][1:]
self.stack.append(l)
return len(self.stack) > 0
# """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
# class NestedInteger(object):
# def isInteger(self):
# """
# @return True if this NestedInteger holds a single integer, rather than a nested list.
# :rtype bool
# """
#
# def getInteger(self):
# """
# @return the single integer that this NestedInteger holds, if it holds a single integer
# Return None if this NestedInteger holds a nested list
# :rtype int
# """
#
# def getList(self):
# """
# @return the nested list that this NestedInteger holds, if it holds a nested list
# Return None if this NestedInteger holds a single integer
# :rtype List[NestedInteger]
# """
| en | 0.620733 | Initialize your data structure here. :type nestedList: List[NestedInteger] :rtype: int :rtype: bool # """ # This is the interface that allows for creating nested lists. # You should not implement it, or speculate about its implementation # """ # class NestedInteger(object): # def isInteger(self): # """ # @return True if this NestedInteger holds a single integer, rather than a nested list. # :rtype bool # """ # # def getInteger(self): # """ # @return the single integer that this NestedInteger holds, if it holds a single integer # Return None if this NestedInteger holds a nested list # :rtype int # """ # # def getList(self): # """ # @return the nested list that this NestedInteger holds, if it holds a nested list # Return None if this NestedInteger holds a single integer # :rtype List[NestedInteger] # """ | 4.162476 | 4 |
pesummary/tests/fetch_test.py | pesummary/pesummary | 1 | 6616607 | <reponame>pesummary/pesummary<gh_stars>1-10
# Licensed under an MIT style license -- see LICENSE.md
from pesummary.io import read
from pesummary.core.fetch import download_and_read_file
from pesummary.gw.fetch import fetch_open_samples
import numpy as np
import requests
import os
__author__ = ["<NAME> <<EMAIL>>"]
def test_download_and_read_file():
"""Test that the `pesummary.core.fetch.download_and_read_file` function
works as expected
"""
data = download_and_read_file(
"https://dcc.ligo.org/public/0157/P1800370/005/GW170608_GWTC-1.hdf5"
)
_data = requests.get(
"https://dcc.ligo.org/public/0157/P1800370/005/GW170608_GWTC-1.hdf5"
)
with open("GW170608_posterior_samples.h5", "wb") as f:
f.write(_data.content)
data2 = read("GW170608_posterior_samples.h5")
np.testing.assert_almost_equal(
np.array(data.samples), np.array(data2.samples)
)
def test_download_and_keep_file():
"""Test that when the `read=False` kwarg is passed to the
download_and_read_file function the filename is returned
"""
file_name = download_and_read_file(
"https://dcc.ligo.org/public/0157/P1800370/005/GW170817_GWTC-1.hdf5",
outdir=".", read_file=False
)
assert os.path.isfile(file_name)
def test_fetch_tarball_and_keep():
"""Test that the `pesummary.gw.fetch.fetch_open_samples` function is able to
fetch, unpack and keep a tarball
"""
directory_name = fetch_open_samples(
"GW190424_180648", read_file=False, outdir=".", unpack=True
)
assert os.path.isdir("./GW190424_180648")
assert os.path.isdir(directory_name)
def test_fetch_tarball_and_keep_single_file():
"""Test that the `pesummary.gw.fetch.fetch_open_samples` function is able to
fetch, unpack and keep a single file stored in a tarball
"""
file_name = fetch_open_samples(
"GW190424_180648", read_file=False, outdir=".", unpack=True,
path="GW190424_180648.h5"
)
assert os.path.isfile("./GW190424_180648.h5")
assert os.path.isfile(file_name)
def test_fetch_and_open_tarball():
"""Test that a `pesummary.gw.fetch.fetch_open_samples` function is able to
fetch, unpack and read a single file stored in a tarball
"""
import pesummary.gw.file.formats.pesummary
f = fetch_open_samples(
"GW190424_180648", read_file=True, outdir=".", unpack=True,
path="GW190424_180648.h5"
)
assert isinstance(f, pesummary.gw.file.formats.pesummary.PESummary)
def test_fetch_open_samples():
"""Test that the `pesummary.gw.fetch.fetch_open_samples` function works as
expected
"""
data = fetch_open_samples("GW150914")
_data = requests.get(
"https://dcc.ligo.org/public/0157/P1800370/005/GW150914_GWTC-1.hdf5"
)
with open("GW150914_posterior_samples.h5", "wb") as f:
f.write(_data.content)
data2 = read("GW150914_posterior_samples.h5")
np.testing.assert_almost_equal(
np.array(data.samples), np.array(data2.samples)
)
def test_fetch_open_strain():
"""Test that the `pesummary.gw.fetch.fetch_open_strain` function works as
expected
"""
from pesummary.gw.fetch import fetch_open_strain
from gwpy.timeseries import TimeSeries
data = fetch_open_strain(
"GW190412", IFO="H1", channel="H1:GWOSC-4KHZ_R1_STRAIN",
sampling_rate=4096
)
_data = requests.get(
"https://www.gw-openscience.org/eventapi/html/GWTC-2/GW190412/v3/"
"H-H1_GWOSC_4KHZ_R1-1239082247-32.gwf"
)
with open("H-H1_GWOSC_4KHZ_R1-1239082247-32.gwf", "wb") as f:
f.write(_data.content)
data2 = TimeSeries.read(
"H-H1_GWOSC_4KHZ_R1-1239082247-32.gwf",
channel="H1:GWOSC-4KHZ_R1_STRAIN"
)
np.testing.assert_almost_equal(data.value, data2.value)
np.testing.assert_almost_equal(data.times.value, data2.times.value)
| # Licensed under an MIT style license -- see LICENSE.md
from pesummary.io import read
from pesummary.core.fetch import download_and_read_file
from pesummary.gw.fetch import fetch_open_samples
import numpy as np
import requests
import os
__author__ = ["<NAME> <<EMAIL>>"]
def test_download_and_read_file():
"""Test that the `pesummary.core.fetch.download_and_read_file` function
works as expected
"""
data = download_and_read_file(
"https://dcc.ligo.org/public/0157/P1800370/005/GW170608_GWTC-1.hdf5"
)
_data = requests.get(
"https://dcc.ligo.org/public/0157/P1800370/005/GW170608_GWTC-1.hdf5"
)
with open("GW170608_posterior_samples.h5", "wb") as f:
f.write(_data.content)
data2 = read("GW170608_posterior_samples.h5")
np.testing.assert_almost_equal(
np.array(data.samples), np.array(data2.samples)
)
def test_download_and_keep_file():
"""Test that when the `read=False` kwarg is passed to the
download_and_read_file function the filename is returned
"""
file_name = download_and_read_file(
"https://dcc.ligo.org/public/0157/P1800370/005/GW170817_GWTC-1.hdf5",
outdir=".", read_file=False
)
assert os.path.isfile(file_name)
def test_fetch_tarball_and_keep():
"""Test that the `pesummary.gw.fetch.fetch_open_samples` function is able to
fetch, unpack and keep a tarball
"""
directory_name = fetch_open_samples(
"GW190424_180648", read_file=False, outdir=".", unpack=True
)
assert os.path.isdir("./GW190424_180648")
assert os.path.isdir(directory_name)
def test_fetch_tarball_and_keep_single_file():
"""Test that the `pesummary.gw.fetch.fetch_open_samples` function is able to
fetch, unpack and keep a single file stored in a tarball
"""
file_name = fetch_open_samples(
"GW190424_180648", read_file=False, outdir=".", unpack=True,
path="GW190424_180648.h5"
)
assert os.path.isfile("./GW190424_180648.h5")
assert os.path.isfile(file_name)
def test_fetch_and_open_tarball():
"""Test that a `pesummary.gw.fetch.fetch_open_samples` function is able to
fetch, unpack and read a single file stored in a tarball
"""
import pesummary.gw.file.formats.pesummary
f = fetch_open_samples(
"GW190424_180648", read_file=True, outdir=".", unpack=True,
path="GW190424_180648.h5"
)
assert isinstance(f, pesummary.gw.file.formats.pesummary.PESummary)
def test_fetch_open_samples():
"""Test that the `pesummary.gw.fetch.fetch_open_samples` function works as
expected
"""
data = fetch_open_samples("GW150914")
_data = requests.get(
"https://dcc.ligo.org/public/0157/P1800370/005/GW150914_GWTC-1.hdf5"
)
with open("GW150914_posterior_samples.h5", "wb") as f:
f.write(_data.content)
data2 = read("GW150914_posterior_samples.h5")
np.testing.assert_almost_equal(
np.array(data.samples), np.array(data2.samples)
)
def test_fetch_open_strain():
"""Test that the `pesummary.gw.fetch.fetch_open_strain` function works as
expected
"""
from pesummary.gw.fetch import fetch_open_strain
from gwpy.timeseries import TimeSeries
data = fetch_open_strain(
"GW190412", IFO="H1", channel="H1:GWOSC-4KHZ_R1_STRAIN",
sampling_rate=4096
)
_data = requests.get(
"https://www.gw-openscience.org/eventapi/html/GWTC-2/GW190412/v3/"
"H-H1_GWOSC_4KHZ_R1-1239082247-32.gwf"
)
with open("H-H1_GWOSC_4KHZ_R1-1239082247-32.gwf", "wb") as f:
f.write(_data.content)
data2 = TimeSeries.read(
"H-H1_GWOSC_4KHZ_R1-1239082247-32.gwf",
channel="H1:GWOSC-4KHZ_R1_STRAIN"
)
np.testing.assert_almost_equal(data.value, data2.value)
np.testing.assert_almost_equal(data.times.value, data2.times.value) | en | 0.817726 | # Licensed under an MIT style license -- see LICENSE.md Test that the `pesummary.core.fetch.download_and_read_file` function works as expected Test that when the `read=False` kwarg is passed to the download_and_read_file function the filename is returned Test that the `pesummary.gw.fetch.fetch_open_samples` function is able to fetch, unpack and keep a tarball Test that the `pesummary.gw.fetch.fetch_open_samples` function is able to fetch, unpack and keep a single file stored in a tarball Test that a `pesummary.gw.fetch.fetch_open_samples` function is able to fetch, unpack and read a single file stored in a tarball Test that the `pesummary.gw.fetch.fetch_open_samples` function works as expected Test that the `pesummary.gw.fetch.fetch_open_strain` function works as expected | 2.476847 | 2 |
system trading/yield_dividend.py | nosy0411/Financial_Engineering | 0 | 6616608 | from bs4 import BeautifulSoup
import requests
from datetime import datetime
def get_dividend_yield(code): # 현재 DPS를 통한 예상 배당수익률
url = "http://companyinfo.stock.naver.com/company/c1010001.aspx?cmp_cd=" + code
html = requests.get(url).text
soup = BeautifulSoup(html, 'html5lib')
dt_data = soup.select("td dl dt")
# print(dt_data)
dividend_yield = dt_data[-2].text
dividend_yield = dividend_yield.split(' ')[1]
dividend_yield = dividend_yield[:-1]
return dividend_yield
def get_current_3years_treasury(): # 일별 3년 만기 국채 수익률 가져오는 것,
url = "https://finance.naver.com/marketindex/interestDailyQuote.nhn?marketindexCd=IRR_GOVT03Y"
html = requests.get(url).text
soup = BeautifulSoup(html, 'html5lib')
td_data = soup.select("tr td")
return td_data[1].text
def get_3years_treasury(): # 3년 만기 국채 수익률 3개년치, 2019~2017, 다음 해가 되어도 써먹을 수 있도록 동적처리함
url = "http://www.index.go.kr/potal/main/EachDtlPageDetail.do?idx_cd=1073"
html = requests.get(url).text
soup = BeautifulSoup(html, 'html5lib')
td_data = soup.find('table', {'class': 'table_style_2'})
result = soup.select('tr:nth-of-type(1) td')
today_year = datetime.today().year - 1
three_years_3years_treasury = {}
for i in range(10, 7, -1):
three_years_3years_treasury[today_year] = result[i].text
today_year -= 1
return three_years_3years_treasury
def get_previous_dividend_yield(code): # 과거 배당 수익률 데이터 3년치
url = "https://finance.naver.com/item/main.nhn?code=" + code
html = requests.get(url).text
soup = BeautifulSoup(html, 'html5lib')
td_data = soup.find('table', {'class': 'tb_type1 tb_num tb_type1_ifrs'})
td_data = td_data.select('tr:nth-of-type(15) td')
previous_3years_dividend_yield = {}
today_year = datetime.today().year - 1
for i in range(2,-1,-1):
previous_3years_dividend_yield[today_year] = td_data[i].text.strip()
today_year -= 1
return previous_3years_dividend_yield
def calculate_estimated_dividend_to_treasury(code): #최신 국채시가배당률 계산
today_year = datetime.today().year -1
if(get_dividend_yield(code)==""):
estimated_dividend_yield=0
elif (float(get_dividend_yield(code))==0): #배당수익률 최신 지표가 없는 경우, 과거 배당수익률을 가져옴
yield_history=get_previous_dividend_yield(code)
if(yield_history[today_year]=="" or yield_history[today_year]=="-"):
estimated_dividend_yield=0
else:
estimated_dividend_yield=float(yield_history[today_year])
else: #배당수익률 최신 지표가 있는 경우, 그걸 가져옴
estimated_dividend_yield=float(get_dividend_yield(code))
current_3year_treasury = float(get_current_3years_treasury())
estimated_dividend_to_treasury = estimated_dividend_yield / current_3year_treasury
return estimated_dividend_to_treasury
if __name__ == "__main__":
print(get_dividend_yield("005930"))
print(get_current_3years_treasury())
print(get_3years_treasury())
print(get_previous_dividend_yield("005930"))
print(calculate_estimated_dividend_to_treasury("005930"))
| from bs4 import BeautifulSoup
import requests
from datetime import datetime
def get_dividend_yield(code): # 현재 DPS를 통한 예상 배당수익률
url = "http://companyinfo.stock.naver.com/company/c1010001.aspx?cmp_cd=" + code
html = requests.get(url).text
soup = BeautifulSoup(html, 'html5lib')
dt_data = soup.select("td dl dt")
# print(dt_data)
dividend_yield = dt_data[-2].text
dividend_yield = dividend_yield.split(' ')[1]
dividend_yield = dividend_yield[:-1]
return dividend_yield
def get_current_3years_treasury(): # 일별 3년 만기 국채 수익률 가져오는 것,
url = "https://finance.naver.com/marketindex/interestDailyQuote.nhn?marketindexCd=IRR_GOVT03Y"
html = requests.get(url).text
soup = BeautifulSoup(html, 'html5lib')
td_data = soup.select("tr td")
return td_data[1].text
def get_3years_treasury(): # 3년 만기 국채 수익률 3개년치, 2019~2017, 다음 해가 되어도 써먹을 수 있도록 동적처리함
url = "http://www.index.go.kr/potal/main/EachDtlPageDetail.do?idx_cd=1073"
html = requests.get(url).text
soup = BeautifulSoup(html, 'html5lib')
td_data = soup.find('table', {'class': 'table_style_2'})
result = soup.select('tr:nth-of-type(1) td')
today_year = datetime.today().year - 1
three_years_3years_treasury = {}
for i in range(10, 7, -1):
three_years_3years_treasury[today_year] = result[i].text
today_year -= 1
return three_years_3years_treasury
def get_previous_dividend_yield(code): # 과거 배당 수익률 데이터 3년치
url = "https://finance.naver.com/item/main.nhn?code=" + code
html = requests.get(url).text
soup = BeautifulSoup(html, 'html5lib')
td_data = soup.find('table', {'class': 'tb_type1 tb_num tb_type1_ifrs'})
td_data = td_data.select('tr:nth-of-type(15) td')
previous_3years_dividend_yield = {}
today_year = datetime.today().year - 1
for i in range(2,-1,-1):
previous_3years_dividend_yield[today_year] = td_data[i].text.strip()
today_year -= 1
return previous_3years_dividend_yield
def calculate_estimated_dividend_to_treasury(code): #최신 국채시가배당률 계산
today_year = datetime.today().year -1
if(get_dividend_yield(code)==""):
estimated_dividend_yield=0
elif (float(get_dividend_yield(code))==0): #배당수익률 최신 지표가 없는 경우, 과거 배당수익률을 가져옴
yield_history=get_previous_dividend_yield(code)
if(yield_history[today_year]=="" or yield_history[today_year]=="-"):
estimated_dividend_yield=0
else:
estimated_dividend_yield=float(yield_history[today_year])
else: #배당수익률 최신 지표가 있는 경우, 그걸 가져옴
estimated_dividend_yield=float(get_dividend_yield(code))
current_3year_treasury = float(get_current_3years_treasury())
estimated_dividend_to_treasury = estimated_dividend_yield / current_3year_treasury
return estimated_dividend_to_treasury
if __name__ == "__main__":
print(get_dividend_yield("005930"))
print(get_current_3years_treasury())
print(get_3years_treasury())
print(get_previous_dividend_yield("005930"))
print(calculate_estimated_dividend_to_treasury("005930"))
| ko | 1.000044 | # 현재 DPS를 통한 예상 배당수익률 # print(dt_data) # 일별 3년 만기 국채 수익률 가져오는 것, # 3년 만기 국채 수익률 3개년치, 2019~2017, 다음 해가 되어도 써먹을 수 있도록 동적처리함 # 과거 배당 수익률 데이터 3년치 #최신 국채시가배당률 계산 #배당수익률 최신 지표가 없는 경우, 과거 배당수익률을 가져옴 #배당수익률 최신 지표가 있는 경우, 그걸 가져옴 | 3.262391 | 3 |
tests/test_polyjuice_check_directory.py | ctsit/Polyjuice | 2 | 6616609 |
import unittest
import os
import os.path
import shutil
from poly_juice.polyjuice import walk_directory
from poly_juice.polyjuice import check_directory
from poly_juice.polyjuice import identify_output
from poly_juice.filch import DicomCaretaker
from poly_juice.lumberjack import Lumberjack
class TestPolyjuiceOutput(unittest.TestCase):
"""
These tests examine polyjuice's check_directory and identify_output
functions. They check that the correct information is used to create a
destination folder for processed images.
"""
def setUp(self):
self.directory = os.path.dirname('tests/testOutput/')
if not os.path.exists(self.directory):
os.makedirs(self.directory)
def test_walk_directory(self):
parent_file = 'tests/testInput'
out_dir = 'tests/testOutput/test_directory'
zip_dir = ''
modifications = {'PatientName': 'Anonymous'}
id_pairs = {'': ''}
dicom_folders = []
log = Lumberjack()
# expects to find 1 folder in pet and 2 folders in mri
# each mri folder has 2 files
expected = ['tests/testOutput/test_directory/103_01_01_2010',
'tests/testOutput/test_directory/101_01_01_2010',
'tests/testOutput/test_directory/102_01_01_2010']
result = walk_directory(parent_file, out_dir, zip_dir, modifications,
id_pairs, dicom_folders, log)
self.assertEqual(expected, result)
def test_check_directory(self):
check_directory('tests/testOutput/test_directory')
result = os.path.exists('tests/testOutput/test_directory')
self.assertTrue(result)
def test_identify_output(self):
editor = DicomCaretaker()
working_file = 'tests/testInput/MRI/101_01_01_2010/1'
out_dir = 'tests/testOutput'
id_pairs = {'': ''}
log = Lumberjack()
expected = 'tests/testOutput/101_01_01_2010/1'
result = identify_output(editor, working_file, out_dir, id_pairs, log)
self.assertEqual(expected, result)
def tearDown(self):
if os.path.exists('tests/testOutput/test_directory'):
shutil.rmtree('tests/testOutput/test_directory')
print("Successfully removed tests/testOutput/test_directory")
if __name__ == "__main__":
unittest.main()
|
import unittest
import os
import os.path
import shutil
from poly_juice.polyjuice import walk_directory
from poly_juice.polyjuice import check_directory
from poly_juice.polyjuice import identify_output
from poly_juice.filch import DicomCaretaker
from poly_juice.lumberjack import Lumberjack
class TestPolyjuiceOutput(unittest.TestCase):
"""
These tests examine polyjuice's check_directory and identify_output
functions. They check that the correct information is used to create a
destination folder for processed images.
"""
def setUp(self):
self.directory = os.path.dirname('tests/testOutput/')
if not os.path.exists(self.directory):
os.makedirs(self.directory)
def test_walk_directory(self):
parent_file = 'tests/testInput'
out_dir = 'tests/testOutput/test_directory'
zip_dir = ''
modifications = {'PatientName': 'Anonymous'}
id_pairs = {'': ''}
dicom_folders = []
log = Lumberjack()
# expects to find 1 folder in pet and 2 folders in mri
# each mri folder has 2 files
expected = ['tests/testOutput/test_directory/103_01_01_2010',
'tests/testOutput/test_directory/101_01_01_2010',
'tests/testOutput/test_directory/102_01_01_2010']
result = walk_directory(parent_file, out_dir, zip_dir, modifications,
id_pairs, dicom_folders, log)
self.assertEqual(expected, result)
def test_check_directory(self):
check_directory('tests/testOutput/test_directory')
result = os.path.exists('tests/testOutput/test_directory')
self.assertTrue(result)
def test_identify_output(self):
editor = DicomCaretaker()
working_file = 'tests/testInput/MRI/101_01_01_2010/1'
out_dir = 'tests/testOutput'
id_pairs = {'': ''}
log = Lumberjack()
expected = 'tests/testOutput/101_01_01_2010/1'
result = identify_output(editor, working_file, out_dir, id_pairs, log)
self.assertEqual(expected, result)
def tearDown(self):
if os.path.exists('tests/testOutput/test_directory'):
shutil.rmtree('tests/testOutput/test_directory')
print("Successfully removed tests/testOutput/test_directory")
if __name__ == "__main__":
unittest.main()
| en | 0.885744 | These tests examine polyjuice's check_directory and identify_output functions. They check that the correct information is used to create a destination folder for processed images. # expects to find 1 folder in pet and 2 folders in mri # each mri folder has 2 files | 2.557778 | 3 |
python/qisys/test/fake_interact.py | vbarbaresi/qibuild | 0 | 6616610 | # Copyright (c) 2012-2018 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYING file.
class FakeInteract(object):
""" A class to tests code depending on qisys.interact
"""
def __init__(self):
self.answers_type = None
self.answer_index = -1
self._answers = None
self.questions = list()
self.editor = None
@property
def answers(self):
if self._answers is None:
raise Exception("FakeInteract not initialized")
return self._answers
# pylint: disable-msg=E1101
@answers.setter
# pylint: disable-msg=E0102
def answers(self, value):
if isinstance(value, dict):
self.answers_type = "dict"
elif isinstance(value, list):
self.answers_type = "list"
else:
raise Exception("Unknow answer type: " + type(value))
self._answers = value
def find_answer(self, message, choices=None, default=None):
keys = self.answers.keys()
for key in keys:
if key in message.lower():
if not choices:
return self.answers[key]
answer = self.answers[key]
if answer in choices:
return answer
else:
mess = "Would answer %s\n" % answer
mess += "But choices are: %s\n" % choices
raise Exception(mess)
if default is not None:
return default
mess = "Could not find answer for\n :: %s\n" % message
mess += "Known keys are: %s" % ", ".join(keys)
raise Exception(mess)
def ask_choice(self, choices, message, **unused): # pylint: disable=unused-argument
print "::", message
for choice in choices:
print "* ", choice
answer = self._get_answer(message, choices)
print ">", answer
return answer
def ask_yes_no(self, message, default=False):
print "::", message,
if default:
print "(Y/n)"
else:
print "(y/N)"
answer = self._get_answer(message, default=default)
print ">", answer
return answer
def ask_path(self, message):
print "::", message
answer = self._get_answer(message)
print ">", answer
return answer
def ask_string(self, message):
print "::", message
answer = self._get_answer(message)
print ">", answer
return answer
def ask_program(self, message):
print "::", message
answer = self._get_answer(message)
print ">", answer
return answer
def get_editor(self):
return self.editor
def _get_answer(self, message, choices=None, default=None):
question = dict()
question['message'] = message
question['choices'] = choices
question['default'] = default
self.questions.append(question)
if self.answers_type == "dict":
return self.find_answer(message, choices=choices, default=default)
self.answer_index += 1
return self.answers[self.answer_index]
| # Copyright (c) 2012-2018 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYING file.
class FakeInteract(object):
""" A class to tests code depending on qisys.interact
"""
def __init__(self):
self.answers_type = None
self.answer_index = -1
self._answers = None
self.questions = list()
self.editor = None
@property
def answers(self):
if self._answers is None:
raise Exception("FakeInteract not initialized")
return self._answers
# pylint: disable-msg=E1101
@answers.setter
# pylint: disable-msg=E0102
def answers(self, value):
if isinstance(value, dict):
self.answers_type = "dict"
elif isinstance(value, list):
self.answers_type = "list"
else:
raise Exception("Unknow answer type: " + type(value))
self._answers = value
def find_answer(self, message, choices=None, default=None):
keys = self.answers.keys()
for key in keys:
if key in message.lower():
if not choices:
return self.answers[key]
answer = self.answers[key]
if answer in choices:
return answer
else:
mess = "Would answer %s\n" % answer
mess += "But choices are: %s\n" % choices
raise Exception(mess)
if default is not None:
return default
mess = "Could not find answer for\n :: %s\n" % message
mess += "Known keys are: %s" % ", ".join(keys)
raise Exception(mess)
def ask_choice(self, choices, message, **unused): # pylint: disable=unused-argument
print "::", message
for choice in choices:
print "* ", choice
answer = self._get_answer(message, choices)
print ">", answer
return answer
def ask_yes_no(self, message, default=False):
print "::", message,
if default:
print "(Y/n)"
else:
print "(y/N)"
answer = self._get_answer(message, default=default)
print ">", answer
return answer
def ask_path(self, message):
print "::", message
answer = self._get_answer(message)
print ">", answer
return answer
def ask_string(self, message):
print "::", message
answer = self._get_answer(message)
print ">", answer
return answer
def ask_program(self, message):
print "::", message
answer = self._get_answer(message)
print ">", answer
return answer
def get_editor(self):
return self.editor
def _get_answer(self, message, choices=None, default=None):
question = dict()
question['message'] = message
question['choices'] = choices
question['default'] = default
self.questions.append(question)
if self.answers_type == "dict":
return self.find_answer(message, choices=choices, default=default)
self.answer_index += 1
return self.answers[self.answer_index]
| en | 0.734721 | # Copyright (c) 2012-2018 SoftBank Robotics. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the COPYING file. A class to tests code depending on qisys.interact # pylint: disable-msg=E1101 # pylint: disable-msg=E0102 # pylint: disable=unused-argument | 2.92341 | 3 |
stories/forms.py | GadirMirzayev/Django-Blog | 1 | 6616611 | from django import forms
from stories.models import *
from stories.utils.validators import mail_validator
class ContactForm(forms.ModelForm):
# full_name = forms.CharField(label='Full name', max_length=127, widget=forms.TextInput(attrs={
# 'class': 'form-control',
# 'placeholder': 'Your name'
# }))
# email = forms.EmailField(label='Email', validators=(mail_validator,),
# widget=forms.EmailInput(attrs={
# 'class': 'form-control',
# 'placeholder': 'Your email'
# }))
class Meta:
model = Contact
fields = (
'full_name',
'email',
'subject',
'message'
)
widgets = {
'full_name': forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Your name'
}),
'email': forms.EmailInput(attrs={
'class': 'form-control',
'placeholder': 'Your email'
}),
'subject': forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Subject'
}),
'message': forms.Textarea(attrs={
'class': 'form-control',
'placeholder': 'Message',
'cols': 50,
})
}
def clean_email(self):
email = self.cleaned_data.get('email')
if not email.endswith('gmail.com'):
raise forms.ValidationError('Daxil edilen email yanliz gmail hesabi olmalidir')
return email
# full_name = forms.CharField(label='Full name', max_length=127, widget=forms.TextInput(attrs={
# 'class': 'form-control',
# 'placeholder': 'Your name'
# }))
# email = forms.EmailField(label='Email', max_length=63, widget=forms.EmailInput(attrs={
# 'class': 'form-control',
# 'placeholder': 'Your email'
# }))
# subject = forms.CharField(label='Subject', max_length=255, widget=forms.EmailInput(attrs={
# 'class': 'form-control',
# 'placeholder': 'Subject'
# }))
# message = forms.CharField(label='Message', widget=forms.Textarea(attrs={
# 'class': 'form-control',
# 'placeholder': 'Message',
# 'cols': 50,
# }))
class StoryForm(forms.ModelForm):
class Meta:
model = Story
fields = (
'title',
'description',
'image',
'author',
'tag',
'category'
)
widgets = {
'title': forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Title'
}),
'description': forms.Textarea(attrs={
'class': 'form-control',
'placeholder': 'Description'
}),
'category': forms.Select(attrs={
'class': 'form-control',
}),
'author': forms.Select(attrs={
'class': 'form-control',
}),
'tag': forms.SelectMultiple(attrs={
'class': 'form-control',
}),
}
class RecipeForm(forms.ModelForm):
class Meta:
model = Recipe
fields = (
'title',
'short_description',
'image',
'description',
'author',
'tag',
'category'
)
widgets = {
'title': forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Title'
}),
'short_description': forms.Textarea(attrs={
'class': 'form-control',
'placeholder': 'Short Description',
}),
'description': forms.Textarea(attrs={
'class': 'form-control',
'placeholder': 'Description'
}),
'category': forms.Select(attrs={
'class': 'form-control',
}),
'author': forms.Select(attrs={
'class': 'form-control',
}),
'tag': forms.SelectMultiple(attrs={
'class': 'form-control',
}),
}
class SubscriberForm(forms.ModelForm):
class Meta:
model = Subscriber
fields = (
'email',
)
widgets = {
'email': forms.EmailInput(attrs={
'placeholder': 'Enter email address',
}),
} | from django import forms
from stories.models import *
from stories.utils.validators import mail_validator
class ContactForm(forms.ModelForm):
# full_name = forms.CharField(label='Full name', max_length=127, widget=forms.TextInput(attrs={
# 'class': 'form-control',
# 'placeholder': 'Your name'
# }))
# email = forms.EmailField(label='Email', validators=(mail_validator,),
# widget=forms.EmailInput(attrs={
# 'class': 'form-control',
# 'placeholder': 'Your email'
# }))
class Meta:
model = Contact
fields = (
'full_name',
'email',
'subject',
'message'
)
widgets = {
'full_name': forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Your name'
}),
'email': forms.EmailInput(attrs={
'class': 'form-control',
'placeholder': 'Your email'
}),
'subject': forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Subject'
}),
'message': forms.Textarea(attrs={
'class': 'form-control',
'placeholder': 'Message',
'cols': 50,
})
}
def clean_email(self):
email = self.cleaned_data.get('email')
if not email.endswith('gmail.com'):
raise forms.ValidationError('Daxil edilen email yanliz gmail hesabi olmalidir')
return email
# full_name = forms.CharField(label='Full name', max_length=127, widget=forms.TextInput(attrs={
# 'class': 'form-control',
# 'placeholder': 'Your name'
# }))
# email = forms.EmailField(label='Email', max_length=63, widget=forms.EmailInput(attrs={
# 'class': 'form-control',
# 'placeholder': 'Your email'
# }))
# subject = forms.CharField(label='Subject', max_length=255, widget=forms.EmailInput(attrs={
# 'class': 'form-control',
# 'placeholder': 'Subject'
# }))
# message = forms.CharField(label='Message', widget=forms.Textarea(attrs={
# 'class': 'form-control',
# 'placeholder': 'Message',
# 'cols': 50,
# }))
class StoryForm(forms.ModelForm):
class Meta:
model = Story
fields = (
'title',
'description',
'image',
'author',
'tag',
'category'
)
widgets = {
'title': forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Title'
}),
'description': forms.Textarea(attrs={
'class': 'form-control',
'placeholder': 'Description'
}),
'category': forms.Select(attrs={
'class': 'form-control',
}),
'author': forms.Select(attrs={
'class': 'form-control',
}),
'tag': forms.SelectMultiple(attrs={
'class': 'form-control',
}),
}
class RecipeForm(forms.ModelForm):
class Meta:
model = Recipe
fields = (
'title',
'short_description',
'image',
'description',
'author',
'tag',
'category'
)
widgets = {
'title': forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Title'
}),
'short_description': forms.Textarea(attrs={
'class': 'form-control',
'placeholder': 'Short Description',
}),
'description': forms.Textarea(attrs={
'class': 'form-control',
'placeholder': 'Description'
}),
'category': forms.Select(attrs={
'class': 'form-control',
}),
'author': forms.Select(attrs={
'class': 'form-control',
}),
'tag': forms.SelectMultiple(attrs={
'class': 'form-control',
}),
}
class SubscriberForm(forms.ModelForm):
class Meta:
model = Subscriber
fields = (
'email',
)
widgets = {
'email': forms.EmailInput(attrs={
'placeholder': 'Enter email address',
}),
} | en | 0.244217 | # full_name = forms.CharField(label='Full name', max_length=127, widget=forms.TextInput(attrs={ # 'class': 'form-control', # 'placeholder': 'Your name' # })) # email = forms.EmailField(label='Email', validators=(mail_validator,), # widget=forms.EmailInput(attrs={ # 'class': 'form-control', # 'placeholder': 'Your email' # })) # full_name = forms.CharField(label='Full name', max_length=127, widget=forms.TextInput(attrs={ # 'class': 'form-control', # 'placeholder': 'Your name' # })) # email = forms.EmailField(label='Email', max_length=63, widget=forms.EmailInput(attrs={ # 'class': 'form-control', # 'placeholder': 'Your email' # })) # subject = forms.CharField(label='Subject', max_length=255, widget=forms.EmailInput(attrs={ # 'class': 'form-control', # 'placeholder': 'Subject' # })) # message = forms.CharField(label='Message', widget=forms.Textarea(attrs={ # 'class': 'form-control', # 'placeholder': 'Message', # 'cols': 50, # })) | 2.454702 | 2 |
scalarmix/bivariate_gaussian.py | openvax/scalarmix | 0 | 6616612 | <reponame>openvax/scalarmix
# Copyright (c) 2018. <NAME> School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from serializable import Serializable
class BivariateGaussian(Serializable):
"""
Fits one bivariate Gaussian mixture per row of data
"""
def __init__(
self,
max_iters=200,
assignment_smoothing=10.0 ** -10,
mean_bias=0.0,
variance_bias=10.0 ** -10,
min_improvement=0.0001,
numeric_error_checking=True):
self.max_iters = max_iters
self.assignment_smoothing = assignment_smoothing
self.mean_bias = mean_bias
self.variance_bias = variance_bias
self.min_improvement = min_improvement
self.numeric_error_checking = numeric_error_checking
self.mean_ = None
self.variance_ = None
self.cluster_weights_ = None
def _e_step(self, X, mean, variance, cluster_weights):
m1, m2 = mean[:, 0], mean[:, 1]
s1, s2 = variance[:, 0], variance[:, 1]
w1, w2 = cluster_weights, 1.0 - cluster_weights
l1 = self.single_gaussian_densities(X, m1, s1)
if self.numeric_error_checking:
assert not np.isnan(l1).any()
assert np.isfinite(l1).all()
l1 *= w1[:, np.newaxis]
if self.numeric_error_checking:
assert not np.isnan(l1).any()
assert np.isfinite(l1).all()
l1 += self.assignment_smoothing
l2 = self.single_gaussian_densities(X, m2, s2)
if self.numeric_error_checking:
assert not np.isnan(l2).any()
assert np.isfinite(l2).all()
l2 *= w2[:, np.newaxis]
if self.numeric_error_checking:
assert not np.isnan(l2).any()
assert np.isfinite(l2).all()
l2 += self.assignment_smoothing
assignments = l1 / (l1 + l2)
if self.numeric_error_checking:
assert not np.isnan(assignments).any()
return assignments
def _m_step(self, X, assignments):
n_rows, n_cols = X.shape
assert assignments.shape == (n_rows, n_cols)
a1 = assignments
a2 = 1.0 - assignments
if self.numeric_error_checking:
assert (a1 >= 0).all(), a1
assert (a1 <= 1).all(), a1
assert X.shape == a1.shape
assert X.shape == a2.shape
mean = np.empty((len(X), 2), dtype="float64")
mean.fill(self.mean_bias)
mean[:, 0] = (X * a1).sum(axis=1) / a1.sum(axis=1)
mean[:, 1] = (X * a2).sum(axis=1) / a2.sum(axis=1)
assert mean.shape == (n_rows, 2), \
"Got mu.shape=%s but expected (%d, 2)" % (mean.shape, n_rows)
m1, m2 = mean[:, 0], mean[:, 1]
# squared distances to both centers
diff1 = X - m1[:, np.newaxis]
diff1 *= diff1
diff2 = X - m2[:, np.newaxis]
diff2 *= diff2
assert diff1.shape == a1.shape
assert diff2.shape == a2.shape
# estimate of variance is the weighted average of
# squared distances from means
variance = np.empty_like(mean)
variance.fill(self.variance_bias)
variance[:, 0] = (diff1 * a1).sum(axis=1) / a1.sum(axis=1)
variance[:, 1] = (diff2 * a2).sum(axis=1) / a2.sum(axis=1)
assert variance.shape == (n_rows, 2)
if self.numeric_error_checking:
assert (variance > 0).all(), "Found %d/%d sigma values<=0" % (
(variance <= 0).sum(),
variance.shape[0] * variance.shape[1])
weights = a1.mean(axis=1)
assert weights.shape == (n_rows,)
if self.numeric_error_checking:
assert (weights >= 0).all(), "Found %d/%d weights<0" % (
(weights < 0).sum(),
len(weights))
assert (weights <= 1).all(), "Found %d/%d weights>1" % (
(weights > 1).sum(),
len(weights))
return mean, variance, weights
def _check_gaussian_params(self, mean, variance, cluster_weights):
if self.numeric_error_checking:
n_rows = mean.shape[0]
assert mean.shape == (n_rows, 2), mean.shape
assert variance.shape == (n_rows, 2), variance.shape
assert cluster_weights.shape == (n_rows,), cluster_weights.shape
assert (variance > 0).all()
assert np.isfinite(mean).all()
assert np.isfinite(variance).all()
assert np.isfinite(cluster_weights).all()
def single_gaussian_densities(self, X, mean, variance):
return np.exp(
self.single_gaussian_log_densities(X, mean, variance))
def single_gaussian_log_densities(self, X, mean, variance):
n_rows, n_cols = X.shape
assert mean.shape == (n_rows,)
assert variance.shape == (n_rows,)
diff = (X - mean[:, np.newaxis])
diff *= diff
z_score = diff / variance[:, np.newaxis]
normalizer = 1.0 / np.sqrt(2 * np.pi * variance[:, np.newaxis])
log_normalizer = np.log(normalizer)
return -0.5 * z_score + log_normalizer
def mixture_densities(
self,
X,
mean=None,
variance=None,
cluster_weights=None):
"""
Returns Gaussian density of each observation under the
mean, std, and mixture coefficients for each row.
"""
if mean is None:
mean = self.mean_
if variance is None:
variance = self.variance_
if cluster_weights is None:
cluster_weights = self.cluster_weights_
if mean is None or variance is None or cluster_weights is None:
raise ValueError("You must call fit() before log_likelihood()")
self._check_gaussian_params(mean, variance, cluster_weights)
n_rows, n_cols = X.shape
m1, m2 = mean[:, 0], mean[:, 1]
s1, s2 = variance[:, 0], variance[:, 1]
w1, w2 = cluster_weights, 1.0 - cluster_weights
###
# Instead of doing w_i * prob(X | m_i, s_i), which involves very small
# numbers, often beyond the range of floating point,
# we'll instead take the log of each term initially and then
# add them exponentiated.
###
log_d1 = self.single_gaussian_log_densities(X, m1, s1)
log_d1 += np.log(w1)[:, np.newaxis]
log_d2 = self.single_gaussian_log_densities(X, m2, s2)
log_d2 += np.log(w2)[:, np.newaxis]
return np.exp(log_d1) + np.exp(log_d2)
def log_mixture_densities(self, X, mean=None, variance=None, cluster_weights=None):
return np.log(self.mixture_densities(
X,
mean=mean,
variance=variance,
cluster_weights=cluster_weights))
def log_likelihood(
self, X, mean=None, variance=None, cluster_weights=None):
log_densities = self.log_mixture_densities(
X,
mean=mean,
variance=variance,
cluster_weights=cluster_weights)
return np.sum(log_densities, axis=1)
def negative_log_likelihood(
self, X, mean=None, variance=None, cluster_weights=None):
return -self.log_likelihood(
X, mean=mean, variance=variance, cluster_weights=cluster_weights)
def normalized_log_likelihood(
self, X, mean=None, variance=None, cluster_weights=None):
n_samples_per_row = X.shape[1]
log_likelihood = self.log_likelihood(
X, mean=mean, variance=variance, cluster_weights=cluster_weights)
return log_likelihood / n_samples_per_row
def normalized_negative_log_likelihood(
self, X, mean=None, variance=None, cluster_weights=None):
return -self.normalized_log_likelihood(
X, mean=mean, variance=variance, cluster_weights=cluster_weights)
def initialize_mixture_params(self, X):
mean = np.empty((len(X), 2), dtype="float64")
mean.fill(self.mean_bias)
variance = np.empty_like(mean)
variance.fill(self.variance_bias)
for i in range(len(X)):
row = X[i, :]
median = np.median(row)
mean[i, 0] += np.mean(row[row < median])
mean[i, 1] += np.mean(row[row >= median])
variance[i, 0] += np.std(row[row < median]) ** 2
variance[i, 1] += np.std(row[row >= median]) ** 2
weights = np.ones(len(X)) * 0.5
self._check_gaussian_params(mean, variance, weights)
return mean, variance, weights
def fit(self, X, verbose=True):
n_rows, n_cols = X.shape
mean, variance, cluster_weights = self.initialize_mixture_params(X)
best_likelihoods = 10.0 ** 30 * np.ones(n_rows, dtype="float64")
for iter_number in range(self.max_iters):
assignments = self._e_step(
X,
mean=mean,
variance=variance,
cluster_weights=cluster_weights)
new_mean, new_variance, new_cluster_weights = \
self._m_step(X, assignments)
# print(new_mean[0])
per_row_normalized_neg_log_likelihood = \
self.normalized_negative_log_likelihood(
X,
mean=new_mean,
variance=new_variance,
cluster_weights=new_cluster_weights)
improvement = (best_likelihoods - per_row_normalized_neg_log_likelihood)
improvement_fraction = improvement / best_likelihoods
improved = improvement_fraction > self.min_improvement
# best_likelihoods = per_row_normalized_neg_log_likelihood
# mean = new_mean
# variance = new_variance
# cluster_weights = new_cluster_weights
best_likelihoods[improved] = per_row_normalized_neg_log_likelihood[improved]
mean[improved] = new_mean[improved]
variance[improved] = new_variance[improved]
cluster_weights[improved] = cluster_weights[improved]
n_improved = improved.sum()
if verbose:
print(
"-- Epoch %d: log likelihood mean=%f (%d improved)" % (
iter_number + 1,
per_row_normalized_neg_log_likelihood.mean(),
n_improved))
if n_improved == 0:
break
self.mean_ = mean
self.variance_ = variance
self.cluster_weights = cluster_weights
return assignments
| # Copyright (c) 2018. <NAME> School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from serializable import Serializable
class BivariateGaussian(Serializable):
"""
Fits one bivariate Gaussian mixture per row of data
"""
def __init__(
self,
max_iters=200,
assignment_smoothing=10.0 ** -10,
mean_bias=0.0,
variance_bias=10.0 ** -10,
min_improvement=0.0001,
numeric_error_checking=True):
self.max_iters = max_iters
self.assignment_smoothing = assignment_smoothing
self.mean_bias = mean_bias
self.variance_bias = variance_bias
self.min_improvement = min_improvement
self.numeric_error_checking = numeric_error_checking
self.mean_ = None
self.variance_ = None
self.cluster_weights_ = None
def _e_step(self, X, mean, variance, cluster_weights):
m1, m2 = mean[:, 0], mean[:, 1]
s1, s2 = variance[:, 0], variance[:, 1]
w1, w2 = cluster_weights, 1.0 - cluster_weights
l1 = self.single_gaussian_densities(X, m1, s1)
if self.numeric_error_checking:
assert not np.isnan(l1).any()
assert np.isfinite(l1).all()
l1 *= w1[:, np.newaxis]
if self.numeric_error_checking:
assert not np.isnan(l1).any()
assert np.isfinite(l1).all()
l1 += self.assignment_smoothing
l2 = self.single_gaussian_densities(X, m2, s2)
if self.numeric_error_checking:
assert not np.isnan(l2).any()
assert np.isfinite(l2).all()
l2 *= w2[:, np.newaxis]
if self.numeric_error_checking:
assert not np.isnan(l2).any()
assert np.isfinite(l2).all()
l2 += self.assignment_smoothing
assignments = l1 / (l1 + l2)
if self.numeric_error_checking:
assert not np.isnan(assignments).any()
return assignments
def _m_step(self, X, assignments):
n_rows, n_cols = X.shape
assert assignments.shape == (n_rows, n_cols)
a1 = assignments
a2 = 1.0 - assignments
if self.numeric_error_checking:
assert (a1 >= 0).all(), a1
assert (a1 <= 1).all(), a1
assert X.shape == a1.shape
assert X.shape == a2.shape
mean = np.empty((len(X), 2), dtype="float64")
mean.fill(self.mean_bias)
mean[:, 0] = (X * a1).sum(axis=1) / a1.sum(axis=1)
mean[:, 1] = (X * a2).sum(axis=1) / a2.sum(axis=1)
assert mean.shape == (n_rows, 2), \
"Got mu.shape=%s but expected (%d, 2)" % (mean.shape, n_rows)
m1, m2 = mean[:, 0], mean[:, 1]
# squared distances to both centers
diff1 = X - m1[:, np.newaxis]
diff1 *= diff1
diff2 = X - m2[:, np.newaxis]
diff2 *= diff2
assert diff1.shape == a1.shape
assert diff2.shape == a2.shape
# estimate of variance is the weighted average of
# squared distances from means
variance = np.empty_like(mean)
variance.fill(self.variance_bias)
variance[:, 0] = (diff1 * a1).sum(axis=1) / a1.sum(axis=1)
variance[:, 1] = (diff2 * a2).sum(axis=1) / a2.sum(axis=1)
assert variance.shape == (n_rows, 2)
if self.numeric_error_checking:
assert (variance > 0).all(), "Found %d/%d sigma values<=0" % (
(variance <= 0).sum(),
variance.shape[0] * variance.shape[1])
weights = a1.mean(axis=1)
assert weights.shape == (n_rows,)
if self.numeric_error_checking:
assert (weights >= 0).all(), "Found %d/%d weights<0" % (
(weights < 0).sum(),
len(weights))
assert (weights <= 1).all(), "Found %d/%d weights>1" % (
(weights > 1).sum(),
len(weights))
return mean, variance, weights
def _check_gaussian_params(self, mean, variance, cluster_weights):
if self.numeric_error_checking:
n_rows = mean.shape[0]
assert mean.shape == (n_rows, 2), mean.shape
assert variance.shape == (n_rows, 2), variance.shape
assert cluster_weights.shape == (n_rows,), cluster_weights.shape
assert (variance > 0).all()
assert np.isfinite(mean).all()
assert np.isfinite(variance).all()
assert np.isfinite(cluster_weights).all()
def single_gaussian_densities(self, X, mean, variance):
return np.exp(
self.single_gaussian_log_densities(X, mean, variance))
def single_gaussian_log_densities(self, X, mean, variance):
n_rows, n_cols = X.shape
assert mean.shape == (n_rows,)
assert variance.shape == (n_rows,)
diff = (X - mean[:, np.newaxis])
diff *= diff
z_score = diff / variance[:, np.newaxis]
normalizer = 1.0 / np.sqrt(2 * np.pi * variance[:, np.newaxis])
log_normalizer = np.log(normalizer)
return -0.5 * z_score + log_normalizer
def mixture_densities(
self,
X,
mean=None,
variance=None,
cluster_weights=None):
"""
Returns Gaussian density of each observation under the
mean, std, and mixture coefficients for each row.
"""
if mean is None:
mean = self.mean_
if variance is None:
variance = self.variance_
if cluster_weights is None:
cluster_weights = self.cluster_weights_
if mean is None or variance is None or cluster_weights is None:
raise ValueError("You must call fit() before log_likelihood()")
self._check_gaussian_params(mean, variance, cluster_weights)
n_rows, n_cols = X.shape
m1, m2 = mean[:, 0], mean[:, 1]
s1, s2 = variance[:, 0], variance[:, 1]
w1, w2 = cluster_weights, 1.0 - cluster_weights
###
# Instead of doing w_i * prob(X | m_i, s_i), which involves very small
# numbers, often beyond the range of floating point,
# we'll instead take the log of each term initially and then
# add them exponentiated.
###
log_d1 = self.single_gaussian_log_densities(X, m1, s1)
log_d1 += np.log(w1)[:, np.newaxis]
log_d2 = self.single_gaussian_log_densities(X, m2, s2)
log_d2 += np.log(w2)[:, np.newaxis]
return np.exp(log_d1) + np.exp(log_d2)
def log_mixture_densities(self, X, mean=None, variance=None, cluster_weights=None):
return np.log(self.mixture_densities(
X,
mean=mean,
variance=variance,
cluster_weights=cluster_weights))
def log_likelihood(
self, X, mean=None, variance=None, cluster_weights=None):
log_densities = self.log_mixture_densities(
X,
mean=mean,
variance=variance,
cluster_weights=cluster_weights)
return np.sum(log_densities, axis=1)
def negative_log_likelihood(
self, X, mean=None, variance=None, cluster_weights=None):
return -self.log_likelihood(
X, mean=mean, variance=variance, cluster_weights=cluster_weights)
def normalized_log_likelihood(
self, X, mean=None, variance=None, cluster_weights=None):
n_samples_per_row = X.shape[1]
log_likelihood = self.log_likelihood(
X, mean=mean, variance=variance, cluster_weights=cluster_weights)
return log_likelihood / n_samples_per_row
def normalized_negative_log_likelihood(
self, X, mean=None, variance=None, cluster_weights=None):
return -self.normalized_log_likelihood(
X, mean=mean, variance=variance, cluster_weights=cluster_weights)
def initialize_mixture_params(self, X):
mean = np.empty((len(X), 2), dtype="float64")
mean.fill(self.mean_bias)
variance = np.empty_like(mean)
variance.fill(self.variance_bias)
for i in range(len(X)):
row = X[i, :]
median = np.median(row)
mean[i, 0] += np.mean(row[row < median])
mean[i, 1] += np.mean(row[row >= median])
variance[i, 0] += np.std(row[row < median]) ** 2
variance[i, 1] += np.std(row[row >= median]) ** 2
weights = np.ones(len(X)) * 0.5
self._check_gaussian_params(mean, variance, weights)
return mean, variance, weights
def fit(self, X, verbose=True):
n_rows, n_cols = X.shape
mean, variance, cluster_weights = self.initialize_mixture_params(X)
best_likelihoods = 10.0 ** 30 * np.ones(n_rows, dtype="float64")
for iter_number in range(self.max_iters):
assignments = self._e_step(
X,
mean=mean,
variance=variance,
cluster_weights=cluster_weights)
new_mean, new_variance, new_cluster_weights = \
self._m_step(X, assignments)
# print(new_mean[0])
per_row_normalized_neg_log_likelihood = \
self.normalized_negative_log_likelihood(
X,
mean=new_mean,
variance=new_variance,
cluster_weights=new_cluster_weights)
improvement = (best_likelihoods - per_row_normalized_neg_log_likelihood)
improvement_fraction = improvement / best_likelihoods
improved = improvement_fraction > self.min_improvement
# best_likelihoods = per_row_normalized_neg_log_likelihood
# mean = new_mean
# variance = new_variance
# cluster_weights = new_cluster_weights
best_likelihoods[improved] = per_row_normalized_neg_log_likelihood[improved]
mean[improved] = new_mean[improved]
variance[improved] = new_variance[improved]
cluster_weights[improved] = cluster_weights[improved]
n_improved = improved.sum()
if verbose:
print(
"-- Epoch %d: log likelihood mean=%f (%d improved)" % (
iter_number + 1,
per_row_normalized_neg_log_likelihood.mean(),
n_improved))
if n_improved == 0:
break
self.mean_ = mean
self.variance_ = variance
self.cluster_weights = cluster_weights
return assignments | en | 0.87943 | # Copyright (c) 2018. <NAME> School of Medicine # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Fits one bivariate Gaussian mixture per row of data # squared distances to both centers # estimate of variance is the weighted average of # squared distances from means Returns Gaussian density of each observation under the mean, std, and mixture coefficients for each row. ### # Instead of doing w_i * prob(X | m_i, s_i), which involves very small # numbers, often beyond the range of floating point, # we'll instead take the log of each term initially and then # add them exponentiated. ### # print(new_mean[0]) # best_likelihoods = per_row_normalized_neg_log_likelihood # mean = new_mean # variance = new_variance # cluster_weights = new_cluster_weights | 2.119516 | 2 |
crawlers/extraction/rota-dos-concursos/draft/crawler_pyspider_rota_concursos_informatica_questions.py | davikawasaki/utfpr-ce-undegrad-final-project | 5 | 6616613 | <filename>crawlers/extraction/rota-dos-concursos/draft/crawler_pyspider_rota_concursos_informatica_questions.py
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Crawler to extract IT Informatics questions from Rota dos Concursos website.
Created at: 2018-07-21 19:26:12
Project: rota_concursos_informatica-microinformatica
Framework used: PySpider
Classes:
" >>> Accents(object)
" >>> Handler(BaseHandler)
Accents class static methods:
" >>> remove_accents(s)
Handler class methods (in execution sequence flow):
" >>> on_start()
" >>> index_page(response)
" >>> detail_page(response)
todo: accept URL JSON config file
returns: JSON from each crawled page in the web pyspider application
"""
import re
import unicodedata
from pyspider.libs.base_handler import *
url = 'http://questoes.grancursosonline.com.br/'
theme = 'informatica-microinformatica'
theme_crawled = url + theme
url_pat = url + 'questoes-de-concursos/' + theme + '-'
regex_url_rule = '(' + url_pat + ')(' + '.+$)'
class Accents(object):
"""Accents class manipulation details.
:param object:
"""
@staticmethod
def remove_accents(s):
"""Remove accents from string.
:param s:
:return [string] string_noaccents:
"""
return unicodedata.normalize('NFD', s).encode('ascii', 'ignore')
class Handler(BaseHandler):
"""PySpider main handler.
:param BaseHandler (class):
"""
@every(minutes=24 * 1)
def on_start(self):
"""PySpider lifecycle starting method.
Next crawling step: self.index_page
:return:
"""
self.crawl(theme_crawled, callback=self.index_page)
@config(age=1 * 24 * 60 * 60) # 10 days period
def index_page(self, response):
"""PySpider lifecycle index page.
Get link items and crawl each page if match pattern from computer science themes.
Next crawling step: self.detail_page
:param response:
:return:
"""
# for each in response.doc('div.panel-body > a.list-group-item[href^="http').items():
for each in response.doc('a').items():
# If theme match regex pattern for computer science cases, crawl page with JS fn
# when opening the page to load all questions' answers
if re.match(url_pat + '.+$', each.attr.href):
self.crawl(each.attr.href, callback=self.detail_page,
fetch_type='js', js_script='''
function() {
var count = 0;
var id = setInterval(function() {
console.log('starting loop',count);
var panel = document.getElementsByClassName('panel panel-default loading-wrapper text-center');
var loadMoreBtn = panel[0].childNodes[1];
var noMoreDiv = panel[0].childNodes[3];
if (loadMoreBtn.attributes.style && loadMoreBtn.attributes.style.nodeValue === "display: none;") {
// No more questions to load
} else {
console.log('clicking new button');
loadMoreBtn.click();
}
count++;
}, 500);
setTimeout(function() {
clearInterval(id);
var list = document.getElementsByClassName('btn btn-black btn-lg');
for(var i in list) {
list[i].click();
}
}, 15000);
}
''')
@config(priority=2)
def detail_page(self, response):
"""Detail questions page.
Get questions' text information and append to ext_quest_list from output extraction object.
:param response:
:return [object] extraction:
"""
count = 0
extraction = {
'theme': response.url.split(theme_crawled)[1],
'ext_quest_list': []
}
# Iterate through all div questions
for question in response.doc('HTML>BODY>DIV.container>DIV.row>SECTION.prova-detalhes>DIV.questao').items():
ext_quest = {
"options": []
}
# Get header test name
header_p_list = question('header div.row div.col-a').children()('p')
if len(header_p_list) == 4:
if 'Superior' in header_p_list[3].text:
ext_quest['test_name'] = header_p_list[0].text
# Get test question text
body_question = question('div.panel-body div.panel-questao div.panel-heading')
body_question_inner_p = body_question.children()('p')
if len(body_question_inner_p) == 0:
ext_quest['question_text'] = re.sub(r'[.]+(?![0-9])', r'. ', body_question.text())
else:
question_text = ''
for body_question_p in body_question_inner_p:
if body_question_p.text is not None:
if len(body_question_p.getchildren()) == 0:
question_text = question_text + body_question_p.text
else:
question_text = question_text + body_question_p.text_content()
ext_quest['question_text'] = re.sub(r'[.]+(?![0-9])', r'. ', question_text.replace('\n', ' '))
if ext_quest['question_text'] == '':
ext_quest['question_text'] = body_question.text()
# Get test question options
body_question_options = question('div.panel-body div.panel-questao div.panel-body ul.list-group')
inner_options = body_question_options.children()('li')
for question_option_li in inner_options:
ext_quest_option = {
"correct": False,
"text": question_option_li.text_content().replace('\n', ' ').replace(' ', '')
}
for classe in question_option_li.values():
if "resposta-correta" in classe:
ext_quest_option['correct'] = True
ext_quest['options'].append(ext_quest_option)
print ext_quest
count = count + 1
extraction['ext_quest_list'].append(ext_quest)
return extraction | <filename>crawlers/extraction/rota-dos-concursos/draft/crawler_pyspider_rota_concursos_informatica_questions.py
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Crawler to extract IT Informatics questions from Rota dos Concursos website.
Created at: 2018-07-21 19:26:12
Project: rota_concursos_informatica-microinformatica
Framework used: PySpider
Classes:
" >>> Accents(object)
" >>> Handler(BaseHandler)
Accents class static methods:
" >>> remove_accents(s)
Handler class methods (in execution sequence flow):
" >>> on_start()
" >>> index_page(response)
" >>> detail_page(response)
todo: accept URL JSON config file
returns: JSON from each crawled page in the web pyspider application
"""
import re
import unicodedata
from pyspider.libs.base_handler import *
url = 'http://questoes.grancursosonline.com.br/'
theme = 'informatica-microinformatica'
theme_crawled = url + theme
url_pat = url + 'questoes-de-concursos/' + theme + '-'
regex_url_rule = '(' + url_pat + ')(' + '.+$)'
class Accents(object):
"""Accents class manipulation details.
:param object:
"""
@staticmethod
def remove_accents(s):
"""Remove accents from string.
:param s:
:return [string] string_noaccents:
"""
return unicodedata.normalize('NFD', s).encode('ascii', 'ignore')
class Handler(BaseHandler):
"""PySpider main handler.
:param BaseHandler (class):
"""
@every(minutes=24 * 1)
def on_start(self):
"""PySpider lifecycle starting method.
Next crawling step: self.index_page
:return:
"""
self.crawl(theme_crawled, callback=self.index_page)
@config(age=1 * 24 * 60 * 60) # 10 days period
def index_page(self, response):
"""PySpider lifecycle index page.
Get link items and crawl each page if match pattern from computer science themes.
Next crawling step: self.detail_page
:param response:
:return:
"""
# for each in response.doc('div.panel-body > a.list-group-item[href^="http').items():
for each in response.doc('a').items():
# If theme match regex pattern for computer science cases, crawl page with JS fn
# when opening the page to load all questions' answers
if re.match(url_pat + '.+$', each.attr.href):
self.crawl(each.attr.href, callback=self.detail_page,
fetch_type='js', js_script='''
function() {
var count = 0;
var id = setInterval(function() {
console.log('starting loop',count);
var panel = document.getElementsByClassName('panel panel-default loading-wrapper text-center');
var loadMoreBtn = panel[0].childNodes[1];
var noMoreDiv = panel[0].childNodes[3];
if (loadMoreBtn.attributes.style && loadMoreBtn.attributes.style.nodeValue === "display: none;") {
// No more questions to load
} else {
console.log('clicking new button');
loadMoreBtn.click();
}
count++;
}, 500);
setTimeout(function() {
clearInterval(id);
var list = document.getElementsByClassName('btn btn-black btn-lg');
for(var i in list) {
list[i].click();
}
}, 15000);
}
''')
@config(priority=2)
def detail_page(self, response):
"""Detail questions page.
Get questions' text information and append to ext_quest_list from output extraction object.
:param response:
:return [object] extraction:
"""
count = 0
extraction = {
'theme': response.url.split(theme_crawled)[1],
'ext_quest_list': []
}
# Iterate through all div questions
for question in response.doc('HTML>BODY>DIV.container>DIV.row>SECTION.prova-detalhes>DIV.questao').items():
ext_quest = {
"options": []
}
# Get header test name
header_p_list = question('header div.row div.col-a').children()('p')
if len(header_p_list) == 4:
if 'Superior' in header_p_list[3].text:
ext_quest['test_name'] = header_p_list[0].text
# Get test question text
body_question = question('div.panel-body div.panel-questao div.panel-heading')
body_question_inner_p = body_question.children()('p')
if len(body_question_inner_p) == 0:
ext_quest['question_text'] = re.sub(r'[.]+(?![0-9])', r'. ', body_question.text())
else:
question_text = ''
for body_question_p in body_question_inner_p:
if body_question_p.text is not None:
if len(body_question_p.getchildren()) == 0:
question_text = question_text + body_question_p.text
else:
question_text = question_text + body_question_p.text_content()
ext_quest['question_text'] = re.sub(r'[.]+(?![0-9])', r'. ', question_text.replace('\n', ' '))
if ext_quest['question_text'] == '':
ext_quest['question_text'] = body_question.text()
# Get test question options
body_question_options = question('div.panel-body div.panel-questao div.panel-body ul.list-group')
inner_options = body_question_options.children()('li')
for question_option_li in inner_options:
ext_quest_option = {
"correct": False,
"text": question_option_li.text_content().replace('\n', ' ').replace(' ', '')
}
for classe in question_option_li.values():
if "resposta-correta" in classe:
ext_quest_option['correct'] = True
ext_quest['options'].append(ext_quest_option)
print ext_quest
count = count + 1
extraction['ext_quest_list'].append(ext_quest)
return extraction | en | 0.458109 | #!/usr/bin/env python # -*- encoding: utf-8 -*- Crawler to extract IT Informatics questions from Rota dos Concursos website. Created at: 2018-07-21 19:26:12 Project: rota_concursos_informatica-microinformatica Framework used: PySpider Classes: " >>> Accents(object) " >>> Handler(BaseHandler) Accents class static methods: " >>> remove_accents(s) Handler class methods (in execution sequence flow): " >>> on_start() " >>> index_page(response) " >>> detail_page(response) todo: accept URL JSON config file returns: JSON from each crawled page in the web pyspider application Accents class manipulation details. :param object: Remove accents from string. :param s: :return [string] string_noaccents: PySpider main handler. :param BaseHandler (class): PySpider lifecycle starting method. Next crawling step: self.index_page :return: # 10 days period PySpider lifecycle index page. Get link items and crawl each page if match pattern from computer science themes. Next crawling step: self.detail_page :param response: :return: # for each in response.doc('div.panel-body > a.list-group-item[href^="http').items(): # If theme match regex pattern for computer science cases, crawl page with JS fn # when opening the page to load all questions' answers function() { var count = 0; var id = setInterval(function() { console.log('starting loop',count); var panel = document.getElementsByClassName('panel panel-default loading-wrapper text-center'); var loadMoreBtn = panel[0].childNodes[1]; var noMoreDiv = panel[0].childNodes[3]; if (loadMoreBtn.attributes.style && loadMoreBtn.attributes.style.nodeValue === "display: none;") { // No more questions to load } else { console.log('clicking new button'); loadMoreBtn.click(); } count++; }, 500); setTimeout(function() { clearInterval(id); var list = document.getElementsByClassName('btn btn-black btn-lg'); for(var i in list) { list[i].click(); } }, 15000); } Detail questions page. Get questions' text information and append to ext_quest_list from output extraction object. :param response: :return [object] extraction: # Iterate through all div questions # Get header test name # Get test question text # Get test question options | 2.321074 | 2 |
english_exam_app/english_exam_app/settings.py | shinji071/english-exam-app | 0 | 6616614 | from .settings_common import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
ALLOWED_HOSTS = [os.environ.get('ALLOWED_HOSTS')]
STATIC_ROOT = '/usr/share/nginx/html/static'
MEDIA_ROOT = '/usr/share/nginx/html/media'
AWS_SES_ACCESS_KEY_ID = os.environ.get('AWS_SES_ACCESS_KEY_ID')
AWS_SES_SECRET_ACCESS_KEY = os.environ.get('AWS_SES_SECRET_ACCESS_KEY')
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
# ロガーの設定
'loggers': {
# Djangoが利用するロガー
'django': {
'handlers': ['file'],
'level': 'DEBUG',
},
# diaryアプリケーションが利用するロガー
'main': {
'handlers': ['file'],
'level': 'DEBUG',
},
},
# ハンドラの設定
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': os.path.join(BASE_DIR, 'logs/django.log'),
'formatter': 'prod',
'when': 'D', # ログローテーション(新しいファイルへの切り替え)間隔の単位(D=日)
'interval': 1, # ログローテーション間隔(1日単位)
'backupCount': 7, # 保存しておくログファイル数
},
},
# フォーマッタの設定
'formatters': {
'prod': {
'format': '\t'.join([
'%(asctime)s',
'[%(levelname)s]',
'%(pathname)s(Line:%(lineno)d)',
'%(message)s'
])
},
}
}
| from .settings_common import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
ALLOWED_HOSTS = [os.environ.get('ALLOWED_HOSTS')]
STATIC_ROOT = '/usr/share/nginx/html/static'
MEDIA_ROOT = '/usr/share/nginx/html/media'
AWS_SES_ACCESS_KEY_ID = os.environ.get('AWS_SES_ACCESS_KEY_ID')
AWS_SES_SECRET_ACCESS_KEY = os.environ.get('AWS_SES_SECRET_ACCESS_KEY')
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
# ロガーの設定
'loggers': {
# Djangoが利用するロガー
'django': {
'handlers': ['file'],
'level': 'DEBUG',
},
# diaryアプリケーションが利用するロガー
'main': {
'handlers': ['file'],
'level': 'DEBUG',
},
},
# ハンドラの設定
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': os.path.join(BASE_DIR, 'logs/django.log'),
'formatter': 'prod',
'when': 'D', # ログローテーション(新しいファイルへの切り替え)間隔の単位(D=日)
'interval': 1, # ログローテーション間隔(1日単位)
'backupCount': 7, # 保存しておくログファイル数
},
},
# フォーマッタの設定
'formatters': {
'prod': {
'format': '\t'.join([
'%(asctime)s',
'[%(levelname)s]',
'%(pathname)s(Line:%(lineno)d)',
'%(message)s'
])
},
}
}
| ja | 0.997982 | # SECURITY WARNING: don't run with debug turned on in production! # ロガーの設定 # Djangoが利用するロガー # diaryアプリケーションが利用するロガー # ハンドラの設定 # ログローテーション(新しいファイルへの切り替え)間隔の単位(D=日) # ログローテーション間隔(1日単位) # 保存しておくログファイル数 # フォーマッタの設定 | 1.708192 | 2 |
controllers/google_api_controller.py | anggelomos/bulloh | 2 | 6616615 | import datetime
import logging
import os.path
import sys
from typing import List
import requests
from config import ROOT_DIR
from controllers.aws_controller import AWSController
from controllers.general_utilities import GeneralUtilities as gu
from data.constants.row_identifier import RowIdentifier
from googleapiclient.discovery import build
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
class GoogleAPIController:
resources_path = ROOT_DIR + "/resources/"
credentials_file = "credentials_google_api.json"
token_file = "google_api_token.json"
def __init__(self):
if not os.path.exists(self.resources_path):
os.makedirs(self.resources_path)
self.download_google_token()
SCOPES = ["https://www.googleapis.com/auth/fitness.sleep.read",
"https://www.googleapis.com/auth/spreadsheets"]
creds = None
if os.path.exists(self.resources_path + self.token_file):
creds = Credentials.from_authorized_user_file(self.resources_path + self.token_file, SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(self.resources_path + self.credentials_file, SCOPES)
creds = flow.run_local_server(port=0)
with open(self.resources_path + self.token_file, "w") as token:
token.write(creds.to_json())
self.upload_google_token()
self.token = creds.token
sheets_service = build('sheets', 'v4', credentials=creds)
self.sheets = sheets_service.spreadsheets().values()
self.sleep_url = "https://www.googleapis.com/fitness/v1/users/me/sessions?activityType=72"
self.bulloh_sheet_id = "1HG9e6-tCuh5o9wq-IwKYab-PiFkuzpp06PUk_1KuS4w"
self.sheet_read_range = "2022!A1:Y367"
self.bulloh_database = self.get_sheets_data()
def get_header_token(self):
return {
"Authorization": f"Bearer {self.token}"
}
def download_google_token(self):
logging.info("Downloading Google API token")
AWSController().download_file(self.resources_path + self.credentials_file, self.credentials_file)
AWSController().download_file(self.resources_path + self.token_file, self.token_file)
def upload_google_token(self):
logging.info("Uploading Google API token")
AWSController().upload_file(self.resources_path + self.credentials_file, self.credentials_file)
AWSController().upload_file(self.resources_path + self.token_file, self.token_file)
def get_sleep_time(self, current_date: str) -> float:
logging.info(f"Getting sleep time for {current_date}")
start_date_milliseconds = gu.get_date_in_milliseconds(current_date)
end_date_milliseconds = gu.get_date_in_milliseconds(gu.parse_date(current_date) + datetime.timedelta(days=1))
sleep_sessions = requests.get(self.sleep_url, headers=self.get_header_token()).json()["session"]
filtered_sleep_sessions = list(filter(lambda x: int(x["startTimeMillis"]) > start_date_milliseconds and int(x["startTimeMillis"]) < end_date_milliseconds, sleep_sessions))
sleep_time = ""
for sleep_session in filtered_sleep_sessions:
if "analysis" in sleep_session["name"]:
return gu.round_number(gu.convert_milliseconds_to_hours(int(sleep_session["endTimeMillis"]) - int(sleep_session["startTimeMillis"])))
sleep_time = gu.round_number(gu.convert_milliseconds_to_hours(int(sleep_session["endTimeMillis"]) - int(sleep_session["startTimeMillis"])))
if not sleep_time and (datetime.date.today() - gu.parse_date(current_date).date()).days > 3:
sleep_time = 0
return sleep_time
def get_sheets_data(self) -> List[dict]:
logging.info("Getting data from sheets")
sheets_raw_data = self.sheets.get(spreadsheetId=self.bulloh_sheet_id,
range=self.sheet_read_range).execute()["values"]
headers = sheets_raw_data.pop(0)
def add_headers(row: list):
row_dict = {}
for index, header in enumerate(headers):
try:
if row[index].replace(".", "", 1).isdigit():
row_dict[header] = float(row[index])
else:
row_dict[header] = row[index]
except IndexError:
row_dict[header] = ""
return row_dict
return list(map(add_headers, sheets_raw_data))
def get_incomplete_dates(self, current_date: str) -> list:
logging.info(f"Getting incomplete dates for {current_date}")
raw_incomplete_dates = []
for row in self.bulloh_database:
if row[RowIdentifier.COMPLETED.value] == "FALSE":
raw_incomplete_dates.append(row)
if row[RowIdentifier.DATE.value] == current_date:
break
def process_incomplete_dates(raw_row: dict) -> list:
return [int(raw_row[RowIdentifier.YEAR_DAY.value]), raw_row[RowIdentifier.DATE.value]]
return list(map(process_incomplete_dates, raw_incomplete_dates))
def update_sheets(self, day: float, data: list):
logging.info(f"Updating sheets day {day} with data {data}")
update_range = f"2022!H{int(day+1)}:Y{int(day+1)}"
body = {
"values": [data]
}
self.sheets.update(spreadsheetId=self.bulloh_sheet_id, range=update_range,
valueInputOption="USER_ENTERED", body=body).execute()
| import datetime
import logging
import os.path
import sys
from typing import List
import requests
from config import ROOT_DIR
from controllers.aws_controller import AWSController
from controllers.general_utilities import GeneralUtilities as gu
from data.constants.row_identifier import RowIdentifier
from googleapiclient.discovery import build
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
class GoogleAPIController:
resources_path = ROOT_DIR + "/resources/"
credentials_file = "credentials_google_api.json"
token_file = "google_api_token.json"
def __init__(self):
if not os.path.exists(self.resources_path):
os.makedirs(self.resources_path)
self.download_google_token()
SCOPES = ["https://www.googleapis.com/auth/fitness.sleep.read",
"https://www.googleapis.com/auth/spreadsheets"]
creds = None
if os.path.exists(self.resources_path + self.token_file):
creds = Credentials.from_authorized_user_file(self.resources_path + self.token_file, SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(self.resources_path + self.credentials_file, SCOPES)
creds = flow.run_local_server(port=0)
with open(self.resources_path + self.token_file, "w") as token:
token.write(creds.to_json())
self.upload_google_token()
self.token = creds.token
sheets_service = build('sheets', 'v4', credentials=creds)
self.sheets = sheets_service.spreadsheets().values()
self.sleep_url = "https://www.googleapis.com/fitness/v1/users/me/sessions?activityType=72"
self.bulloh_sheet_id = "1HG9e6-tCuh5o9wq-IwKYab-PiFkuzpp06PUk_1KuS4w"
self.sheet_read_range = "2022!A1:Y367"
self.bulloh_database = self.get_sheets_data()
def get_header_token(self):
return {
"Authorization": f"Bearer {self.token}"
}
def download_google_token(self):
logging.info("Downloading Google API token")
AWSController().download_file(self.resources_path + self.credentials_file, self.credentials_file)
AWSController().download_file(self.resources_path + self.token_file, self.token_file)
def upload_google_token(self):
logging.info("Uploading Google API token")
AWSController().upload_file(self.resources_path + self.credentials_file, self.credentials_file)
AWSController().upload_file(self.resources_path + self.token_file, self.token_file)
def get_sleep_time(self, current_date: str) -> float:
logging.info(f"Getting sleep time for {current_date}")
start_date_milliseconds = gu.get_date_in_milliseconds(current_date)
end_date_milliseconds = gu.get_date_in_milliseconds(gu.parse_date(current_date) + datetime.timedelta(days=1))
sleep_sessions = requests.get(self.sleep_url, headers=self.get_header_token()).json()["session"]
filtered_sleep_sessions = list(filter(lambda x: int(x["startTimeMillis"]) > start_date_milliseconds and int(x["startTimeMillis"]) < end_date_milliseconds, sleep_sessions))
sleep_time = ""
for sleep_session in filtered_sleep_sessions:
if "analysis" in sleep_session["name"]:
return gu.round_number(gu.convert_milliseconds_to_hours(int(sleep_session["endTimeMillis"]) - int(sleep_session["startTimeMillis"])))
sleep_time = gu.round_number(gu.convert_milliseconds_to_hours(int(sleep_session["endTimeMillis"]) - int(sleep_session["startTimeMillis"])))
if not sleep_time and (datetime.date.today() - gu.parse_date(current_date).date()).days > 3:
sleep_time = 0
return sleep_time
def get_sheets_data(self) -> List[dict]:
logging.info("Getting data from sheets")
sheets_raw_data = self.sheets.get(spreadsheetId=self.bulloh_sheet_id,
range=self.sheet_read_range).execute()["values"]
headers = sheets_raw_data.pop(0)
def add_headers(row: list):
row_dict = {}
for index, header in enumerate(headers):
try:
if row[index].replace(".", "", 1).isdigit():
row_dict[header] = float(row[index])
else:
row_dict[header] = row[index]
except IndexError:
row_dict[header] = ""
return row_dict
return list(map(add_headers, sheets_raw_data))
def get_incomplete_dates(self, current_date: str) -> list:
logging.info(f"Getting incomplete dates for {current_date}")
raw_incomplete_dates = []
for row in self.bulloh_database:
if row[RowIdentifier.COMPLETED.value] == "FALSE":
raw_incomplete_dates.append(row)
if row[RowIdentifier.DATE.value] == current_date:
break
def process_incomplete_dates(raw_row: dict) -> list:
return [int(raw_row[RowIdentifier.YEAR_DAY.value]), raw_row[RowIdentifier.DATE.value]]
return list(map(process_incomplete_dates, raw_incomplete_dates))
def update_sheets(self, day: float, data: list):
logging.info(f"Updating sheets day {day} with data {data}")
update_range = f"2022!H{int(day+1)}:Y{int(day+1)}"
body = {
"values": [data]
}
self.sheets.update(spreadsheetId=self.bulloh_sheet_id, range=update_range,
valueInputOption="USER_ENTERED", body=body).execute()
| none | 1 | 2.313201 | 2 | |
incertitude.py | ClipCard/incertitude | 0 | 6616616 |
"""incertitude geonames loader is a working example of creating a simple
geocoder for possibly ambiguous input queries.
Usage:
incertitude.py <mapping_file.json> <geonames_file_path> <settings_file.json> <host:port>
incertitude.py (-h | --help)
incertitude.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
import csv
import json
from docopt import docopt
from elasticsearch import Elasticsearch
if __name__ == '__main__':
arguments = docopt(__doc__, version='incertitude geonames 1.0')
db_path = arguments["<geonames_file_path>"]
mapping_file = arguments["<mapping_file.json>"]
settings_file = arguments["<settings_file.json>"]
host, port = arguments["<host:port>"].split(":")
with file(mapping_file) as f:
mapping = json.load(f)
with file(settings_file) as f:
settings = json.load(f)
es = Elasticsearch([{"host": host, "port": port}])
body = {
"settings": settings,
"mappings": mapping
}
# create an index in elasticsearch, ignore status code 400 (index already exists)
es.indices.delete(index='geocode', ignore=404)
es.indices.create(index='geocode', ignore=400, body=body)
with file(db_path) as db:
reader = csv.reader(db, 'excel-tab')
for row in reader:
_id = row[0]
name = row[1]
country = row[8]
lat = row[4]
lng = row[5]
state = row[10]
population = row[14]
data = {
"name": name + " " + state,
"country": country,
"population": population,
"location": {
"lat": lat,
"lon": lng
}
}
es.index(index="geocode", doc_type="place", id=_id, body=data)
|
"""incertitude geonames loader is a working example of creating a simple
geocoder for possibly ambiguous input queries.
Usage:
incertitude.py <mapping_file.json> <geonames_file_path> <settings_file.json> <host:port>
incertitude.py (-h | --help)
incertitude.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
import csv
import json
from docopt import docopt
from elasticsearch import Elasticsearch
if __name__ == '__main__':
arguments = docopt(__doc__, version='incertitude geonames 1.0')
db_path = arguments["<geonames_file_path>"]
mapping_file = arguments["<mapping_file.json>"]
settings_file = arguments["<settings_file.json>"]
host, port = arguments["<host:port>"].split(":")
with file(mapping_file) as f:
mapping = json.load(f)
with file(settings_file) as f:
settings = json.load(f)
es = Elasticsearch([{"host": host, "port": port}])
body = {
"settings": settings,
"mappings": mapping
}
# create an index in elasticsearch, ignore status code 400 (index already exists)
es.indices.delete(index='geocode', ignore=404)
es.indices.create(index='geocode', ignore=400, body=body)
with file(db_path) as db:
reader = csv.reader(db, 'excel-tab')
for row in reader:
_id = row[0]
name = row[1]
country = row[8]
lat = row[4]
lng = row[5]
state = row[10]
population = row[14]
data = {
"name": name + " " + state,
"country": country,
"population": population,
"location": {
"lat": lat,
"lon": lng
}
}
es.index(index="geocode", doc_type="place", id=_id, body=data)
| en | 0.434498 | incertitude geonames loader is a working example of creating a simple geocoder for possibly ambiguous input queries. Usage: incertitude.py <mapping_file.json> <geonames_file_path> <settings_file.json> <host:port> incertitude.py (-h | --help) incertitude.py --version Options: -h --help Show this screen. --version Show version. # create an index in elasticsearch, ignore status code 400 (index already exists) | 3.119868 | 3 |
cogs/utils/chess_utils.py | ChaoticNebula5/Chintu-Bot | 19 | 6616617 | <filename>cogs/utils/chess_utils.py
import io
import cairosvg
import chess
import chess.pgn
import chess.svg
import discord
from abc import ABC
"""
sus={'id': 'WXyKWw1q',
'variant': {'key': 'standard', 'name': 'Standard', 'short': 'Std'},
'speed': 'correspondence',
'perf': 'correspondence',
'rated': False,
'initialFen': 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1',
'fen': 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1',
'player': 'white',
'turns': 0,
'startedAtTurn': 0,
'source': 'ai',
'status': {'id': 20, 'name': 'started'},
'createdAt': 1627135755014
}
"""
class ChessUtils:
def __init__(self, client):
self.client = client
def import_game_by_id(self, game_id: str):
pgn = str(self.client.games.export(game_id, as_pgn=True))
game = chess.pgn.read_game(io.StringIO(pgn))
return game
def import_last_game_by_player(self, player_name: str):
pgn = list(self.client.games.export_by_player(player_name, max=1, as_pgn=True))[0]
game = chess.pgn.read_game(io.StringIO(pgn))
return game
def save_moves_as_png(self, game: chess.pgn.GameNode):
ply = 1
for position in game.mainline():
self.save_position_as_png(position.board(), ply)
ply += 1
def create_ai_game(self, level:int, color):
if level < 0:
level = 0
elif level > 8:
level = 8
return self.client.challenges.create_ai(level=level, color=color)
@classmethod
def save_position_as_png(cls, board: chess.Board, ply: int):
boardsvg = chess.svg.board(board=board)
cairosvg.svg2png(file_obj=io.StringIO(str(boardsvg)), write_to=f"move{ply}.png")
@classmethod
def create_position_embed(cls, game: chess.pgn.Game, board: chess.Board, end=False):
board_svg = chess.svg.board(board)
png_bytes = cairosvg.svg2png(file_obj=io.StringIO(str(board_svg)))
file = discord.File(io.BytesIO(png_bytes), filename="chess.png")
embed = discord.Embed(
title=f":white_circle: {game.headers['White']}({game.headers['WhiteElo']}) vs. "
f":black_circle: {game.headers['Black']}({game.headers['BlackElo']})",
description=f"Event: {game.headers['Event']} | Result: {game.headers['Result']}{' | End of game' if end else ''}"
)
return embed, file
@classmethod
def render_board(cls, board:chess.Board, headers):
board_svg = chess.svg.board(board)
png_bytes = cairosvg.svg2png(file_obj=io.StringIO(str(board_svg)))
file = discord.File(io.BytesIO(png_bytes), filename="chess.png")
embed = discord.Embed(
title=f":white_circle: {headers['White']}({headers['WhiteElo']}) vs. "
f":black_circle: {headers['Black']}({headers['BlackElo']})",
description=f"Event: {headers['Event']} | Result: {headers['Result']}"
)
return embed, file
| <filename>cogs/utils/chess_utils.py
import io
import cairosvg
import chess
import chess.pgn
import chess.svg
import discord
from abc import ABC
"""
sus={'id': 'WXyKWw1q',
'variant': {'key': 'standard', 'name': 'Standard', 'short': 'Std'},
'speed': 'correspondence',
'perf': 'correspondence',
'rated': False,
'initialFen': 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1',
'fen': 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1',
'player': 'white',
'turns': 0,
'startedAtTurn': 0,
'source': 'ai',
'status': {'id': 20, 'name': 'started'},
'createdAt': 1627135755014
}
"""
class ChessUtils:
def __init__(self, client):
self.client = client
def import_game_by_id(self, game_id: str):
pgn = str(self.client.games.export(game_id, as_pgn=True))
game = chess.pgn.read_game(io.StringIO(pgn))
return game
def import_last_game_by_player(self, player_name: str):
pgn = list(self.client.games.export_by_player(player_name, max=1, as_pgn=True))[0]
game = chess.pgn.read_game(io.StringIO(pgn))
return game
def save_moves_as_png(self, game: chess.pgn.GameNode):
ply = 1
for position in game.mainline():
self.save_position_as_png(position.board(), ply)
ply += 1
def create_ai_game(self, level:int, color):
if level < 0:
level = 0
elif level > 8:
level = 8
return self.client.challenges.create_ai(level=level, color=color)
@classmethod
def save_position_as_png(cls, board: chess.Board, ply: int):
boardsvg = chess.svg.board(board=board)
cairosvg.svg2png(file_obj=io.StringIO(str(boardsvg)), write_to=f"move{ply}.png")
@classmethod
def create_position_embed(cls, game: chess.pgn.Game, board: chess.Board, end=False):
board_svg = chess.svg.board(board)
png_bytes = cairosvg.svg2png(file_obj=io.StringIO(str(board_svg)))
file = discord.File(io.BytesIO(png_bytes), filename="chess.png")
embed = discord.Embed(
title=f":white_circle: {game.headers['White']}({game.headers['WhiteElo']}) vs. "
f":black_circle: {game.headers['Black']}({game.headers['BlackElo']})",
description=f"Event: {game.headers['Event']} | Result: {game.headers['Result']}{' | End of game' if end else ''}"
)
return embed, file
@classmethod
def render_board(cls, board:chess.Board, headers):
board_svg = chess.svg.board(board)
png_bytes = cairosvg.svg2png(file_obj=io.StringIO(str(board_svg)))
file = discord.File(io.BytesIO(png_bytes), filename="chess.png")
embed = discord.Embed(
title=f":white_circle: {headers['White']}({headers['WhiteElo']}) vs. "
f":black_circle: {headers['Black']}({headers['BlackElo']})",
description=f"Event: {headers['Event']} | Result: {headers['Result']}"
)
return embed, file
| en | 0.130621 | sus={'id': 'WXyKWw1q', 'variant': {'key': 'standard', 'name': 'Standard', 'short': 'Std'}, 'speed': 'correspondence', 'perf': 'correspondence', 'rated': False, 'initialFen': 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1', 'fen': 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1', 'player': 'white', 'turns': 0, 'startedAtTurn': 0, 'source': 'ai', 'status': {'id': 20, 'name': 'started'}, 'createdAt': 1627135755014 } | 2.756179 | 3 |
loss.py | terry97-guel/Naive-Motor2Pos | 0 | 6616618 | #%%
import torch
def Pos_norm2(output, label):
loss = torch.nn.MSELoss()(output,label)
return loss
| #%%
import torch
def Pos_norm2(output, label):
loss = torch.nn.MSELoss()(output,label)
return loss
| none | 1 | 2.521081 | 3 | |
src/features/get_features.py | stoensin/Match-R-CNN-Repoduction | 1 | 6616619 | import argparse
import os
import pickle
from collections import defaultdict
from os import listdir
from pathlib import Path
import cv2
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.model_zoo import model_zoo
from detectron2.modeling import build_model
import torch
from tqdm import tqdm
def get_features(images_list, model):
with torch.no_grad():
images = model.preprocess_image(images_list)
features = model.backbone(images.tensor)
proposals, _ = model.proposal_generator(images, features)
instances, _ = model.roi_heads(images, features, proposals)
mask_features = [features[f] for f in model.roi_heads.in_features]
mask_features = model.roi_heads.mask_pooler(mask_features, [x.pred_boxes for x in instances])
return mask_features, instances
# def get_features(images_list, model):
# with torch.no_grad():
# images = model.preprocess_image(images_list) # don't forget to preprocess
# features = model.backbone(images.tensor) # set of cnn features
# proposals, _ = model.proposal_generator(images, features, None) # RPN
#
# features_ = [features[f] for f in model.roi_heads.box_in_features]
# box_features = model.roi_heads.box_pooler(features_, [x.proposal_boxes for x in proposals])
# box_features = model.roi_heads.box_head(box_features) # features of all 1k candidates
# predictions = model.roi_heads.box_predictor(box_features)
# pred_instances, pred_inds = model.roi_heads.box_predictor.inference(predictions, proposals)
# pred_instances = model.roi_heads.forward_with_given_boxes(features, pred_instances)
#
# # output boxes, masks, scores, etc
# pred_instances = model._postprocess(pred_instances, images_list, images.image_sizes) # scale box to orig size
# # features of the proposed boxes
# feats = box_features[pred_inds]
#
# return feats, pred_instances
# def make_features(image_dir, model, target_dir):
# image_list = listdir(image_dir)
#
# with torch.no_grad():
# for img in tqdm(image_list):
# img_path = os.path.join(image_dir, img)
# image = cv2.imread(img_path)
# height, width = image.shape[:2]
# image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
# inputs = [{"image": image, "height": height, "width": width}]
#
# feature, pred_instances = get_features(inputs, model)
#
# # feature_file = open(os.path.join(target_dir, Path(img).stem), 'a')
# # feature_file.write(str(feature.tolist()) + '\n')
# # feature_file.write(str(pred_instances) + '\n')
# # feature_file.close()
# with open(os.path.join(target_dir, Path(img).stem) + '.pkl', 'wb') as f:
# pickle.dump((feature, pred_instances), f)
# f.close()
def make_features(image_dir, model, target_dir):
image_list = listdir(image_dir)
feature_dict = defaultdict(list)
with torch.no_grad():
for img in tqdm(image_list):
img_path = os.path.join(image_dir, img)
image = cv2.imread(img_path)
height, width = image.shape[:2]
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = [{"image": image, "height": height, "width": width}]
feature, pred_instances = get_features(inputs, model)
idx = 0
for feat in feature:
feat_class = pred_instances[0].get('pred_classes')[idx].to(torch.device("cpu"))
with open(os.path.join(target_dir, Path(img).stem) + '_' + str(idx) + '.pkl', 'wb') as f:
pickle.dump((feat.to(torch.device("cpu")), feat_class), f)
f.close()
feature_dict[Path(img).stem].append((Path(img).stem + '_' + str(idx), feat_class))
idx += 1
with open(os.path.join('data', 'results', 'validation_feature_index_class_dict.pkl'), 'wb') as f:
pickle.dump(feature_dict, f)
f.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i',
'--image_path',
help='image dir path',
type=str,
default=os.path.join('data', 'raw', 'validation', 'image'))
parser.add_argument('-t',
'--target_dir',
help='image dir path',
type=str,
default=os.path.join('data', 'results', 'pooled_features', 'validation'))
args = parser.parse_args()
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file('COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml'))
cfg.MODEL.WEIGHTS = os.path.join('output', 'model_final.pth')
cfg.SOLVER.IMS_PER_BATCH = 1
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 13
cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS = [0.7]
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7
cfg.INPUT.MIN_SIZE_TEST = 800
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
model.eval()
make_features(args.image_path, model, args.target_dir)
if __name__ == '__main__':
main()
| import argparse
import os
import pickle
from collections import defaultdict
from os import listdir
from pathlib import Path
import cv2
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.model_zoo import model_zoo
from detectron2.modeling import build_model
import torch
from tqdm import tqdm
def get_features(images_list, model):
with torch.no_grad():
images = model.preprocess_image(images_list)
features = model.backbone(images.tensor)
proposals, _ = model.proposal_generator(images, features)
instances, _ = model.roi_heads(images, features, proposals)
mask_features = [features[f] for f in model.roi_heads.in_features]
mask_features = model.roi_heads.mask_pooler(mask_features, [x.pred_boxes for x in instances])
return mask_features, instances
# def get_features(images_list, model):
# with torch.no_grad():
# images = model.preprocess_image(images_list) # don't forget to preprocess
# features = model.backbone(images.tensor) # set of cnn features
# proposals, _ = model.proposal_generator(images, features, None) # RPN
#
# features_ = [features[f] for f in model.roi_heads.box_in_features]
# box_features = model.roi_heads.box_pooler(features_, [x.proposal_boxes for x in proposals])
# box_features = model.roi_heads.box_head(box_features) # features of all 1k candidates
# predictions = model.roi_heads.box_predictor(box_features)
# pred_instances, pred_inds = model.roi_heads.box_predictor.inference(predictions, proposals)
# pred_instances = model.roi_heads.forward_with_given_boxes(features, pred_instances)
#
# # output boxes, masks, scores, etc
# pred_instances = model._postprocess(pred_instances, images_list, images.image_sizes) # scale box to orig size
# # features of the proposed boxes
# feats = box_features[pred_inds]
#
# return feats, pred_instances
# def make_features(image_dir, model, target_dir):
# image_list = listdir(image_dir)
#
# with torch.no_grad():
# for img in tqdm(image_list):
# img_path = os.path.join(image_dir, img)
# image = cv2.imread(img_path)
# height, width = image.shape[:2]
# image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
# inputs = [{"image": image, "height": height, "width": width}]
#
# feature, pred_instances = get_features(inputs, model)
#
# # feature_file = open(os.path.join(target_dir, Path(img).stem), 'a')
# # feature_file.write(str(feature.tolist()) + '\n')
# # feature_file.write(str(pred_instances) + '\n')
# # feature_file.close()
# with open(os.path.join(target_dir, Path(img).stem) + '.pkl', 'wb') as f:
# pickle.dump((feature, pred_instances), f)
# f.close()
def make_features(image_dir, model, target_dir):
image_list = listdir(image_dir)
feature_dict = defaultdict(list)
with torch.no_grad():
for img in tqdm(image_list):
img_path = os.path.join(image_dir, img)
image = cv2.imread(img_path)
height, width = image.shape[:2]
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = [{"image": image, "height": height, "width": width}]
feature, pred_instances = get_features(inputs, model)
idx = 0
for feat in feature:
feat_class = pred_instances[0].get('pred_classes')[idx].to(torch.device("cpu"))
with open(os.path.join(target_dir, Path(img).stem) + '_' + str(idx) + '.pkl', 'wb') as f:
pickle.dump((feat.to(torch.device("cpu")), feat_class), f)
f.close()
feature_dict[Path(img).stem].append((Path(img).stem + '_' + str(idx), feat_class))
idx += 1
with open(os.path.join('data', 'results', 'validation_feature_index_class_dict.pkl'), 'wb') as f:
pickle.dump(feature_dict, f)
f.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i',
'--image_path',
help='image dir path',
type=str,
default=os.path.join('data', 'raw', 'validation', 'image'))
parser.add_argument('-t',
'--target_dir',
help='image dir path',
type=str,
default=os.path.join('data', 'results', 'pooled_features', 'validation'))
args = parser.parse_args()
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file('COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml'))
cfg.MODEL.WEIGHTS = os.path.join('output', 'model_final.pth')
cfg.SOLVER.IMS_PER_BATCH = 1
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 13
cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS = [0.7]
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7
cfg.INPUT.MIN_SIZE_TEST = 800
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
model.eval()
make_features(args.image_path, model, args.target_dir)
if __name__ == '__main__':
main()
| en | 0.593272 | # def get_features(images_list, model): # with torch.no_grad(): # images = model.preprocess_image(images_list) # don't forget to preprocess # features = model.backbone(images.tensor) # set of cnn features # proposals, _ = model.proposal_generator(images, features, None) # RPN # # features_ = [features[f] for f in model.roi_heads.box_in_features] # box_features = model.roi_heads.box_pooler(features_, [x.proposal_boxes for x in proposals]) # box_features = model.roi_heads.box_head(box_features) # features of all 1k candidates # predictions = model.roi_heads.box_predictor(box_features) # pred_instances, pred_inds = model.roi_heads.box_predictor.inference(predictions, proposals) # pred_instances = model.roi_heads.forward_with_given_boxes(features, pred_instances) # # # output boxes, masks, scores, etc # pred_instances = model._postprocess(pred_instances, images_list, images.image_sizes) # scale box to orig size # # features of the proposed boxes # feats = box_features[pred_inds] # # return feats, pred_instances # def make_features(image_dir, model, target_dir): # image_list = listdir(image_dir) # # with torch.no_grad(): # for img in tqdm(image_list): # img_path = os.path.join(image_dir, img) # image = cv2.imread(img_path) # height, width = image.shape[:2] # image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)) # inputs = [{"image": image, "height": height, "width": width}] # # feature, pred_instances = get_features(inputs, model) # # # feature_file = open(os.path.join(target_dir, Path(img).stem), 'a') # # feature_file.write(str(feature.tolist()) + '\n') # # feature_file.write(str(pred_instances) + '\n') # # feature_file.close() # with open(os.path.join(target_dir, Path(img).stem) + '.pkl', 'wb') as f: # pickle.dump((feature, pred_instances), f) # f.close() | 2.184902 | 2 |
pagetools/src/utils/img_processing.py | ThisTunaCanFly/PAGETools | 4 | 6616620 | from typing import Tuple, Union
import numpy as np
import cv2
def background_calc_dispatch_table(mode: str):
dispatch_table = {
"dominant": calc_dominat_color,
"mean": calc_mean_color,
"median": calc_median_color
}
return dispatch_table[mode]
def calc_dominat_color(img: np.array) -> Tuple[int]:
"""Calculates the dominant color of an image using bincounts
:param img:
:return:
"""
two_dim = img.reshape(-1, img.shape[-1])
color_range = (256,)*3
one_dim = np.ravel_multi_index(two_dim.T, color_range)
return tuple([int(c) for c in np.unravel_index(np.bincount(one_dim).argmax(), color_range)])
def calc_mean_color(img: np.array) -> Tuple[int]:
"""
:param img:
:return:
"""
return img.mean(axis=0).mean(axis=0)
def calc_median_color(img: np.array) -> np.ndarray:
"""
:param img:
:return:
"""
return np.median(np.median(img, axis=0), axis=0)
def rotate_img(image: np.ndarray, angle: float, background: Union[int, Tuple[int, int, int]]) -> np.ndarray:
"""
:param image:
:param angle:
:param background:
:return:
"""
rows = image.shape[0]
cols = image.shape[1]
img_center = (cols / 2, rows / 2)
rot_mat = cv2.getRotationMatrix2D(img_center, angle*-1, 1)
rotated_image = cv2.warpAffine(image, rot_mat, (cols, rows), borderValue=background)
return rotated_image
| from typing import Tuple, Union
import numpy as np
import cv2
def background_calc_dispatch_table(mode: str):
dispatch_table = {
"dominant": calc_dominat_color,
"mean": calc_mean_color,
"median": calc_median_color
}
return dispatch_table[mode]
def calc_dominat_color(img: np.array) -> Tuple[int]:
"""Calculates the dominant color of an image using bincounts
:param img:
:return:
"""
two_dim = img.reshape(-1, img.shape[-1])
color_range = (256,)*3
one_dim = np.ravel_multi_index(two_dim.T, color_range)
return tuple([int(c) for c in np.unravel_index(np.bincount(one_dim).argmax(), color_range)])
def calc_mean_color(img: np.array) -> Tuple[int]:
"""
:param img:
:return:
"""
return img.mean(axis=0).mean(axis=0)
def calc_median_color(img: np.array) -> np.ndarray:
"""
:param img:
:return:
"""
return np.median(np.median(img, axis=0), axis=0)
def rotate_img(image: np.ndarray, angle: float, background: Union[int, Tuple[int, int, int]]) -> np.ndarray:
"""
:param image:
:param angle:
:param background:
:return:
"""
rows = image.shape[0]
cols = image.shape[1]
img_center = (cols / 2, rows / 2)
rot_mat = cv2.getRotationMatrix2D(img_center, angle*-1, 1)
rotated_image = cv2.warpAffine(image, rot_mat, (cols, rows), borderValue=background)
return rotated_image
| en | 0.367662 | Calculates the dominant color of an image using bincounts :param img: :return: :param img: :return: :param img: :return: :param image: :param angle: :param background: :return: | 2.955871 | 3 |
FUNDAMENTALS_MODULE/Exam_Exercise_Mid/SoftUni_Reception.py | sleepychild/ProgramingBasicsPython | 0 | 6616621 | <gh_stars>0
from typing import List, Tuple, Generator, Callable, Union
from enum import Enum
DEBUG: bool = False
TEST_RUNS: Tuple[Tuple[str]] = (
(
'5',
'6',
'4',
'20',
),
(
'1',
'2',
'3',
'45',
),
(
'3',
'2',
'5',
'40',
),
(
'1',
'1',
'1',
'7',
),
)
def get_run_generator(test_data: Tuple[str]) -> Callable[[], str]:
test_data_gen: Generator[str, None, None] = (line for line in test_data)
def generate_input() -> str:
return next(test_data_gen)
return generate_input
FIRST: int = 0
LAST: int = -1
class Directions(Enum):
UP: Tuple[int] = (-1, 0,)
RIGHT: Tuple[int] = (0, 1,)
DOWN: Tuple[int] = (1, 0,)
LEFT: Tuple[int] = (0, -1,)
class Base:
@classmethod
def FromInput(cls) -> 'Base':
print(input())
return cls()
class ControlClass:
def __init__(self) -> None:
self.r_a: int = int(input())
self.r_b: int = int(input())
self.r_c: int = int(input())
self.sq: int = int(input())
self.tn: int = int()
def tick(self) -> None:
self.tn += 1
if self.tn % 4 != 0:
self.sq -= self.r_a
self.sq -= self.r_b
self.sq -= self.r_c
def run(self) -> None:
while self.sq > 0:
self.tick()
print(f'Time needed: {self.tn}h.')
def solution():
ctrl: ControlClass = ControlClass()
ctrl.run()
if DEBUG:
for test_run in TEST_RUNS:
input: Callable[[], str] = get_run_generator(test_run)
solution()
else:
solution()
| from typing import List, Tuple, Generator, Callable, Union
from enum import Enum
DEBUG: bool = False
TEST_RUNS: Tuple[Tuple[str]] = (
(
'5',
'6',
'4',
'20',
),
(
'1',
'2',
'3',
'45',
),
(
'3',
'2',
'5',
'40',
),
(
'1',
'1',
'1',
'7',
),
)
def get_run_generator(test_data: Tuple[str]) -> Callable[[], str]:
test_data_gen: Generator[str, None, None] = (line for line in test_data)
def generate_input() -> str:
return next(test_data_gen)
return generate_input
FIRST: int = 0
LAST: int = -1
class Directions(Enum):
UP: Tuple[int] = (-1, 0,)
RIGHT: Tuple[int] = (0, 1,)
DOWN: Tuple[int] = (1, 0,)
LEFT: Tuple[int] = (0, -1,)
class Base:
@classmethod
def FromInput(cls) -> 'Base':
print(input())
return cls()
class ControlClass:
def __init__(self) -> None:
self.r_a: int = int(input())
self.r_b: int = int(input())
self.r_c: int = int(input())
self.sq: int = int(input())
self.tn: int = int()
def tick(self) -> None:
self.tn += 1
if self.tn % 4 != 0:
self.sq -= self.r_a
self.sq -= self.r_b
self.sq -= self.r_c
def run(self) -> None:
while self.sq > 0:
self.tick()
print(f'Time needed: {self.tn}h.')
def solution():
ctrl: ControlClass = ControlClass()
ctrl.run()
if DEBUG:
for test_run in TEST_RUNS:
input: Callable[[], str] = get_run_generator(test_run)
solution()
else:
solution() | none | 1 | 3.227157 | 3 | |
spiderBasic/requestsTest/basisUsage/newInstance.py | turoDog/LearningPython | 2 | 6616622 | import requests
response = requests.get('https://www.baidu.com')
print(type(response))
print(response.status_code)
print(type(response.text))
print(response.text)
print(response.cookies)
| import requests
response = requests.get('https://www.baidu.com')
print(type(response))
print(response.status_code)
print(type(response.text))
print(response.text)
print(response.cookies)
| none | 1 | 2.585978 | 3 | |
georssy/test_georssy.py | devsf/georssy | 0 | 6616623 | <reponame>devsf/georssy
"""
tests.Test
---------------
:copyright: (c) 2017 by <NAME>.
:license: Apache2, see LICENSE for more details.
"""
import logging
import unittest
import xml.etree.ElementTree as ET
from api import decode as GeoRssDecoder
class TestGeoRSSy( unittest.TestCase ):
def test_no_parameters( self ):
logging.basicConfig( level = logging.ERROR )
# Check "no parameters" error
self.assertRaises( ValueError, GeoRssDecoder )
def test_no_parent_node( self ):
logging.basicConfig( level = logging.ERROR )
# Check "parent_node = None" error
self.assertRaises( ValueError, GeoRssDecoder, parent_node = None )
def test_no_polygons_over_boxes( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_simple_point.xml' ).getroot()
# Check "polygons_over_boxes = False" when not specified
d = GeoRssDecoder( parent_node = parent_node ) # DUMMY parent node
self.assertEqual( d.polygons_over_boxes, False )
class TestSimpleGeoRSS( unittest.TestCase ):
def test_simple_point( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_simple_point.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check Simple Point detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.point_list )
self.assertNotEqual( d.point_list, [] )
def test_simple_line( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_simple_line.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check Simple Line detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.line_list )
self.assertNotEqual( d.line_list, [] )
def test_simple_polygon( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_simple_polygon.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check Simple Polygon detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.polygon_list )
self.assertNotEqual( d.polygon_list, [] )
def test_simple_box( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_simple_box.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check Simple Box detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.polygon_list )
self.assertNotEqual( d.polygon_list, [] )
def test_feature_type( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_simple_point.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check Feature Type detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.feature_type_list )
self.assertNotEqual( d.feature_type_list, [] )
def test_feature_name( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_simple_point.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check Feature Name detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.feature_name_list )
self.assertNotEqual( d.feature_name_list, [] )
def test_relationship( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_simple_point.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check Relationship detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.relationship_list )
self.assertNotEqual( d.relationship_list, [] )
def test_elevation( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_simple_point.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check Elevation detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.elevation_list )
self.assertNotEqual( d.elevation_list, [] )
def test_floor( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_simple_point.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check Floor detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.floor_list )
self.assertNotEqual( d.floor_list, [] )
def test_radius( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_simple_point.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check Radius detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.radius_list )
self.assertNotEqual( d.radius_list, [] )
class TestGmlGeoRSS( unittest.TestCase ):
def test_gml_point( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_gml_point.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check GML Point detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.point_list )
self.assertNotEqual( d.point_list, [] )
def test_gml_line_1( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_gml_line_1.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check GML Line detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.line_list )
self.assertNotEqual( d.line_list, [] )
def test_gml_line_2( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_gml_line_2.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check GML Line detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.line_list )
self.assertNotEqual( d.line_list, [] )
def test_gml_line_3( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_gml_line_3.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check GML Line detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.line_list )
self.assertNotEqual( d.line_list, [] )
def test_gml_polygon_1( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_gml_polygon_1.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check GML Polygon detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.polygon_list )
self.assertNotEqual( d.polygon_list, [] )
def test_gml_polygon_2( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_gml_polygon_2.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check GML Polygon detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.polygon_list )
self.assertNotEqual( d.polygon_list, [] )
def test_gml_polygon_3( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_gml_polygon_3.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check GML Polygon detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.polygon_list )
self.assertNotEqual( d.polygon_list, [] )
def test_gml_polygon_4( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_gml_polygon_4.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check GML Polygon detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.polygon_list )
self.assertNotEqual( d.polygon_list, [] )
def test_gml_polygon_5( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_gml_polygon_5.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check GML Polygon detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.polygon_list )
self.assertNotEqual( d.polygon_list, [] )
def test_gml_polygon_6( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_gml_polygon_6.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check GML Polygon detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.polygon_list )
self.assertNotEqual( d.polygon_list, [] )
def test_gml_box( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_gml_box.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check GML Box detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.polygon_list )
self.assertNotEqual( d.polygon_list, [] )
if __name__ == "__main__":
tc_list = ( TestGeoRSSy, TestSimpleGeoRSS, TestGmlGeoRSS )
ts = unittest.TestSuite()
for tc in tc_list:
ts.addTests( unittest.TestLoader().loadTestsFromTestCase( tc ) )
unittest.TextTestRunner( verbosity = 2 ).run( ts )
| """
tests.Test
---------------
:copyright: (c) 2017 by <NAME>.
:license: Apache2, see LICENSE for more details.
"""
import logging
import unittest
import xml.etree.ElementTree as ET
from api import decode as GeoRssDecoder
class TestGeoRSSy( unittest.TestCase ):
def test_no_parameters( self ):
logging.basicConfig( level = logging.ERROR )
# Check "no parameters" error
self.assertRaises( ValueError, GeoRssDecoder )
def test_no_parent_node( self ):
logging.basicConfig( level = logging.ERROR )
# Check "parent_node = None" error
self.assertRaises( ValueError, GeoRssDecoder, parent_node = None )
def test_no_polygons_over_boxes( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_simple_point.xml' ).getroot()
# Check "polygons_over_boxes = False" when not specified
d = GeoRssDecoder( parent_node = parent_node ) # DUMMY parent node
self.assertEqual( d.polygons_over_boxes, False )
class TestSimpleGeoRSS( unittest.TestCase ):
def test_simple_point( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_simple_point.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check Simple Point detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.point_list )
self.assertNotEqual( d.point_list, [] )
def test_simple_line( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_simple_line.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check Simple Line detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.line_list )
self.assertNotEqual( d.line_list, [] )
def test_simple_polygon( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_simple_polygon.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check Simple Polygon detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.polygon_list )
self.assertNotEqual( d.polygon_list, [] )
def test_simple_box( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_simple_box.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check Simple Box detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.polygon_list )
self.assertNotEqual( d.polygon_list, [] )
def test_feature_type( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_simple_point.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check Feature Type detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.feature_type_list )
self.assertNotEqual( d.feature_type_list, [] )
def test_feature_name( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_simple_point.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check Feature Name detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.feature_name_list )
self.assertNotEqual( d.feature_name_list, [] )
def test_relationship( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_simple_point.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check Relationship detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.relationship_list )
self.assertNotEqual( d.relationship_list, [] )
def test_elevation( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_simple_point.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check Elevation detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.elevation_list )
self.assertNotEqual( d.elevation_list, [] )
def test_floor( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_simple_point.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check Floor detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.floor_list )
self.assertNotEqual( d.floor_list, [] )
def test_radius( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_simple_point.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check Radius detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.radius_list )
self.assertNotEqual( d.radius_list, [] )
class TestGmlGeoRSS( unittest.TestCase ):
def test_gml_point( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_gml_point.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check GML Point detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.point_list )
self.assertNotEqual( d.point_list, [] )
def test_gml_line_1( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_gml_line_1.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check GML Line detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.line_list )
self.assertNotEqual( d.line_list, [] )
def test_gml_line_2( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_gml_line_2.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check GML Line detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.line_list )
self.assertNotEqual( d.line_list, [] )
def test_gml_line_3( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_gml_line_3.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check GML Line detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.line_list )
self.assertNotEqual( d.line_list, [] )
def test_gml_polygon_1( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_gml_polygon_1.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check GML Polygon detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.polygon_list )
self.assertNotEqual( d.polygon_list, [] )
def test_gml_polygon_2( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_gml_polygon_2.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check GML Polygon detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.polygon_list )
self.assertNotEqual( d.polygon_list, [] )
def test_gml_polygon_3( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_gml_polygon_3.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check GML Polygon detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.polygon_list )
self.assertNotEqual( d.polygon_list, [] )
def test_gml_polygon_4( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_gml_polygon_4.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check GML Polygon detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.polygon_list )
self.assertNotEqual( d.polygon_list, [] )
def test_gml_polygon_5( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_gml_polygon_5.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check GML Polygon detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.polygon_list )
self.assertNotEqual( d.polygon_list, [] )
def test_gml_polygon_6( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_gml_polygon_6.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check GML Polygon detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.polygon_list )
self.assertNotEqual( d.polygon_list, [] )
def test_gml_box( self ):
logging.basicConfig( level = logging.ERROR )
parent_node = ET.parse( 'georssy/data/tests/test_gml_box.xml' ).getroot().find( '{http://www.w3.org/2005/Atom}entry' )
# Check GML Box detection
d = GeoRssDecoder( parent_node = parent_node )
self.assertIsNotNone( d.polygon_list )
self.assertNotEqual( d.polygon_list, [] )
if __name__ == "__main__":
tc_list = ( TestGeoRSSy, TestSimpleGeoRSS, TestGmlGeoRSS )
ts = unittest.TestSuite()
for tc in tc_list:
ts.addTests( unittest.TestLoader().loadTestsFromTestCase( tc ) )
unittest.TextTestRunner( verbosity = 2 ).run( ts ) | en | 0.530232 | tests.Test --------------- :copyright: (c) 2017 by <NAME>. :license: Apache2, see LICENSE for more details. # Check "no parameters" error # Check "parent_node = None" error # Check "polygons_over_boxes = False" when not specified # DUMMY parent node # Check Simple Point detection # Check Simple Line detection # Check Simple Polygon detection # Check Simple Box detection # Check Feature Type detection # Check Feature Name detection # Check Relationship detection # Check Elevation detection # Check Floor detection # Check Radius detection # Check GML Point detection # Check GML Line detection # Check GML Line detection # Check GML Line detection # Check GML Polygon detection # Check GML Polygon detection # Check GML Polygon detection # Check GML Polygon detection # Check GML Polygon detection # Check GML Polygon detection # Check GML Box detection | 2.335692 | 2 |
interface/run.py | libscARM/cli | 0 | 6616624 | <filename>interface/run.py
import argparse
import logging
import sys
import subprocess
from .version import LIBSCARM_VERSION
from typing import List, Optional
LOG_FILENAME = 'guineapig-tracebacks.log'
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)
def parse_args(argv: List[str]):
description = '''
Starts the command line interface of libsc
'''
formatter_class = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(description=description,
formatter_class=formatter_class)
parser.add_argument('-v',
'--version',
action='store_true',
help='Print the version of the cli.')
parser.add_argument('-i',
'--init',
action='store_true',
default=False,
help='Set up the test environment.')
parser.add_argument('-l',
'--load',
action='store_true',
default=False,
help='Load the test suite.')
parser.add_argument('--run',
action='store_true',
default=False,
help='Run L1 and L2 cache tests.')
return parser.parse_args(argv)
def main(options: Optional[List[str]]=None):
'''
Launch terminal interface
'''
argv = options if options is not None else sys.argv[1:]
args = parse_args(argv)
if args.version:
print('libsc-arm ', LIBSCARM_VERSION)
if args.init:
subprocess.run(['chmod', 'u+x', 'scripts/init.sh'])
subprocess.run(['./scripts/init.sh'])
if args.load:
subprocess.run(['chmod', 'u+x', 'conn/setup.sh'])
subprocess.run(['./conn/setup.sh'])
if args.run:
print('Running test suite')
if args.run:
print('Generating test report')
if __name__=='__main__':
main()
| <filename>interface/run.py
import argparse
import logging
import sys
import subprocess
from .version import LIBSCARM_VERSION
from typing import List, Optional
LOG_FILENAME = 'guineapig-tracebacks.log'
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)
def parse_args(argv: List[str]):
description = '''
Starts the command line interface of libsc
'''
formatter_class = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(description=description,
formatter_class=formatter_class)
parser.add_argument('-v',
'--version',
action='store_true',
help='Print the version of the cli.')
parser.add_argument('-i',
'--init',
action='store_true',
default=False,
help='Set up the test environment.')
parser.add_argument('-l',
'--load',
action='store_true',
default=False,
help='Load the test suite.')
parser.add_argument('--run',
action='store_true',
default=False,
help='Run L1 and L2 cache tests.')
return parser.parse_args(argv)
def main(options: Optional[List[str]]=None):
'''
Launch terminal interface
'''
argv = options if options is not None else sys.argv[1:]
args = parse_args(argv)
if args.version:
print('libsc-arm ', LIBSCARM_VERSION)
if args.init:
subprocess.run(['chmod', 'u+x', 'scripts/init.sh'])
subprocess.run(['./scripts/init.sh'])
if args.load:
subprocess.run(['chmod', 'u+x', 'conn/setup.sh'])
subprocess.run(['./conn/setup.sh'])
if args.run:
print('Running test suite')
if args.run:
print('Generating test report')
if __name__=='__main__':
main()
| en | 0.638585 | Starts the command line interface of libsc Launch terminal interface | 2.390159 | 2 |
src/incendium/db.py | thecesrom/incendium | 2 | 6616625 | <gh_stars>1-10
"""Database module."""
__all__ = [
"DisposableConnection",
"InParam",
"OutParam",
"Param",
"check",
"execute_non_query",
"get_data",
"get_output_params",
"get_return_value",
]
import system.db
from com.inductiveautomation.ignition.common import BasicDataset
from java.lang import Thread
class DisposableConnection(object):
"""Disposable Connection.
A disposable connection enables a database connection in Ignition
and disables it once the operation is completed to release
resources.
"""
def __init__(self, database, retries=3):
"""Disposable Connection initializer.
Args:
database (str): The name of the database connection in
Ignition.
retries (int): The number of additional times to retry
enabling the connection. Optional.
"""
self.database = database
self.retries = retries
def __enter__(self):
"""Enter the runtime context related to this object."""
system.db.setDatasourceEnabled(self.database, True)
for _ in range(self.retries):
Thread.sleep(1000)
if self.status == "Valid":
break
if self.status == "Faulted":
raise IOError(
"The database connection {!r} is {}.".format(
self.database, self.status
)
)
else:
raise IOError(
"The database connection {!r} could not be enabled.".format(
self.database
)
)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Exit the runtime context related to this object."""
system.db.setDatasourceEnabled(self.database, False)
@property
def status(self):
"""Get connection status."""
connection_info = system.db.getConnectionInfo(self.database)
return connection_info.getValueAt(0, "Status")
class Param(object):
"""Base class used for defining [In|Out]put parameters."""
def __init__(self, name_or_index=None, type_code=None, value=None):
"""Param object initializer.
Args:
name_or_index (object): Parameter name or index.
type_code (int): Type code constant.
value (object): Value of type type_code.
"""
self._name_or_index = name_or_index
self._type_code = type_code
self._value = value
@property
def name_or_index(self):
"""Get value of name_or_index."""
return self._name_or_index
@property
def type_code(self):
"""Get value of type_code."""
return self._type_code
@property
def value(self):
"""Get value of value."""
return self._value
class InParam(Param):
"""Class used for declaring INPUT parameters."""
def __init__(self, name_or_index, type_code, value):
"""Create an instance of InParam.
Args:
name_or_index (object): Index (int starting at 1, not 0), or
name (str).
type_code (int): Type code constant from `system.db`.
value (object): Value of type type_code.
"""
super(InParam, self).__init__(
name_or_index=name_or_index, type_code=type_code, value=value
)
class OutParam(Param):
"""Class used for declaring OUTPUT parameters."""
def __init__(self, name_or_index, type_code):
"""Create an instance of OutParam.
Args:
name_or_index (object): Index (int starting at 1, not 0), or
name (str).
type_code (int): Type code constant from `system.db`.
"""
super(OutParam, self).__init__(
name_or_index=name_or_index, type_code=type_code
)
def _execute_sp(
stored_procedure,
database="",
transaction=None,
skip_audit=False,
in_params=None,
out_params=None,
get_out_params=False,
get_result_set=False,
get_ret_val=False,
return_type_code=None,
get_update_count=False,
):
"""Execute a stored procedure against the connection.
Args:
stored_procedure (str): The name of the stored procedure to
execute.
database (str): The name of the database connection to execute
against. If omitted or "", the project's default database
connection will be used. Optional.
transaction (str): A transaction identifier. If omitted, the
call will be executed in its own transaction. Optional.
skip_audit (bool): A flag which, if set to True, will cause the
procedure call to skip the audit system. Useful for some
queries that have fields which won't fit into the audit log.
Optional.
in_params (list[InParam]): A Dictionary containing INPUT
parameters. Optional.
out_params (list[OutParam]): A Dictionary containing OUTPUT
parameters. Optional.
get_out_params (bool): A flag indicating whether or not to
return OUTPUT parameters after execution. Optional.
get_result_set (bool): A flag indicating whether or not to
return a dataset that is the resulting data of the stored
procedure, if any. Optional.
get_ret_val (bool): A flag indicating whether or not to return
the return value of the stored procedure Call. Optional.
return_type_code (int): The return value Type Code. Optional.
get_update_count (bool): A flag indicating whether or not to
return the number of rows modified by the stored
procedure, or -1 if not applicable. Optional.
Returns:
dict: Result dictionary.
"""
_out_params = {}
result = {
"output_params": None,
"result_set": None,
"return_value": None,
"update_count": None,
}
call = system.db.createSProcCall(
procedureName=stored_procedure,
database=database,
tx=transaction,
skipAudit=skip_audit,
)
if in_params is not None:
if not isinstance(in_params, list):
raise TypeError("in_params must be of type 'list'.")
for param in in_params:
call.registerInParam(
param.name_or_index, param.type_code, param.value
)
if out_params is not None:
if not isinstance(in_params, list):
raise TypeError("out_params must be of type 'list'.")
for param in out_params:
call.registerOutParam(param.name_or_index, param.type_code)
if get_ret_val:
call.registerReturnParam(return_type_code)
system.db.execSProcCall(call)
if out_params is not None:
for param in out_params:
_out_params[param.name_or_index] = call.getOutParamValue(
param.name_or_index
)
result["output_params"] = _out_params if get_out_params else None
result["result_set"] = call.getResultSet() if get_result_set else None
result["return_value"] = call.getReturnValue() if get_ret_val else None
result["update_count"] = (
call.getUpdateCount() if get_update_count else None
)
return result
def check(stored_procedure, database="", params=None):
"""Execute a stored procedure against the connection.
This will return a flag set to TRUE or FALSE.
Args:
stored_procedure (str): The name of the stored procedure to
execute.
database (str): The name of the database connection to execute
against. If omitted or "", the project's default database
connection will be used. Optional.
params (list[InParam]): A Dictionary containing all parameters.
Optional.
Returns:
bool: The flag.
"""
output = OutParam("flag", system.db.BIT)
output_params = get_output_params(
stored_procedure, output=[output], database=database, params=params
)
return output_params["flag"]
def execute_non_query(
stored_procedure, database="", transaction=None, params=None
):
"""Execute a stored procedure against the connection.
Used for UPDATE, INSERT, and DELETE statements.
Args:
stored_procedure (str): The name of the stored procedure to
execute.
database (str): The name of the database connection to execute
against. If omitted or "", the project's default database
connection will be used. Optional.
transaction (str): A transaction identifier. If omitted, the
call will be executed in its own transaction. Optional.
params (list[InParam]): A Dictionary containing all parameters.
Optional.
Returns:
int: The number of rows modified by the stored procedure, or
-1 if not applicable.
"""
result = _execute_sp(
stored_procedure,
database=database,
transaction=transaction,
in_params=params,
get_update_count=True,
)
return result["update_count"]
def get_data(stored_procedure, database="", params=None):
"""Get data by executing a stored procedure.
Args:
stored_procedure (str): The name of the stored procedure to
execute.
database (str): The name of the database connection to execute
against. If omitted or "", the project's default database
connection will be used. Optional.
params (list[InParam]): A Dictionary containing all parameters.
Optional.
Returns:
BasicDataset: A Dataset that is the resulting data of the stored
procedure call, if any.
"""
result = _execute_sp(
stored_procedure,
database=database,
in_params=params,
get_result_set=True,
)
return result["result_set"]
def get_output_params(
stored_procedure, output, database="", transaction=None, params=None
):
"""Get the Output parameters from the Stored Procedure.
Args:
stored_procedure (str): The name of the stored procedure to
execute.
output (list[OutParam]): A Dictionary containing all output
parameters.
database (str): The name of the database connection to execute
against. If omitted or "", the project's default database
connection will be used. Optional.
transaction (str): A transaction identifier. If omitted, the
call will be executed in its own transaction. Optional.
params (list[InParam]): A Dictionary containing all parameters.
Optional.
Returns:
dict: Result's output_params.
"""
result = _execute_sp(
stored_procedure,
database=database,
transaction=transaction,
in_params=params,
out_params=output,
get_out_params=True,
)
return result["output_params"]
def get_return_value(
stored_procedure,
return_type_code,
database="",
transaction=None,
params=None,
):
"""Get the Return Value from the Stored Procedure.
Args:
stored_procedure (str): The name of the stored procedure to
execute.
return_type_code (int): The Type Code of the Return Value.
database (str): The name of the database connection to execute
against. If omitted or "", the project's default database
connection will be used. Optional.
transaction (str): A transaction identifier. If omitted, the
call will be executed in its own transaction. Optional.
params (list[InParam]): A Dictionary containing all parameters.
Optional.
Returns:
int: The return value.
"""
result = _execute_sp(
stored_procedure,
database=database,
transaction=transaction,
in_params=params,
return_type_code=return_type_code,
get_ret_val=True,
)
return result["return_value"]
| """Database module."""
__all__ = [
"DisposableConnection",
"InParam",
"OutParam",
"Param",
"check",
"execute_non_query",
"get_data",
"get_output_params",
"get_return_value",
]
import system.db
from com.inductiveautomation.ignition.common import BasicDataset
from java.lang import Thread
class DisposableConnection(object):
"""Disposable Connection.
A disposable connection enables a database connection in Ignition
and disables it once the operation is completed to release
resources.
"""
def __init__(self, database, retries=3):
"""Disposable Connection initializer.
Args:
database (str): The name of the database connection in
Ignition.
retries (int): The number of additional times to retry
enabling the connection. Optional.
"""
self.database = database
self.retries = retries
def __enter__(self):
"""Enter the runtime context related to this object."""
system.db.setDatasourceEnabled(self.database, True)
for _ in range(self.retries):
Thread.sleep(1000)
if self.status == "Valid":
break
if self.status == "Faulted":
raise IOError(
"The database connection {!r} is {}.".format(
self.database, self.status
)
)
else:
raise IOError(
"The database connection {!r} could not be enabled.".format(
self.database
)
)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Exit the runtime context related to this object."""
system.db.setDatasourceEnabled(self.database, False)
@property
def status(self):
"""Get connection status."""
connection_info = system.db.getConnectionInfo(self.database)
return connection_info.getValueAt(0, "Status")
class Param(object):
"""Base class used for defining [In|Out]put parameters."""
def __init__(self, name_or_index=None, type_code=None, value=None):
"""Param object initializer.
Args:
name_or_index (object): Parameter name or index.
type_code (int): Type code constant.
value (object): Value of type type_code.
"""
self._name_or_index = name_or_index
self._type_code = type_code
self._value = value
@property
def name_or_index(self):
"""Get value of name_or_index."""
return self._name_or_index
@property
def type_code(self):
"""Get value of type_code."""
return self._type_code
@property
def value(self):
"""Get value of value."""
return self._value
class InParam(Param):
"""Class used for declaring INPUT parameters."""
def __init__(self, name_or_index, type_code, value):
"""Create an instance of InParam.
Args:
name_or_index (object): Index (int starting at 1, not 0), or
name (str).
type_code (int): Type code constant from `system.db`.
value (object): Value of type type_code.
"""
super(InParam, self).__init__(
name_or_index=name_or_index, type_code=type_code, value=value
)
class OutParam(Param):
"""Class used for declaring OUTPUT parameters."""
def __init__(self, name_or_index, type_code):
"""Create an instance of OutParam.
Args:
name_or_index (object): Index (int starting at 1, not 0), or
name (str).
type_code (int): Type code constant from `system.db`.
"""
super(OutParam, self).__init__(
name_or_index=name_or_index, type_code=type_code
)
def _execute_sp(
stored_procedure,
database="",
transaction=None,
skip_audit=False,
in_params=None,
out_params=None,
get_out_params=False,
get_result_set=False,
get_ret_val=False,
return_type_code=None,
get_update_count=False,
):
"""Execute a stored procedure against the connection.
Args:
stored_procedure (str): The name of the stored procedure to
execute.
database (str): The name of the database connection to execute
against. If omitted or "", the project's default database
connection will be used. Optional.
transaction (str): A transaction identifier. If omitted, the
call will be executed in its own transaction. Optional.
skip_audit (bool): A flag which, if set to True, will cause the
procedure call to skip the audit system. Useful for some
queries that have fields which won't fit into the audit log.
Optional.
in_params (list[InParam]): A Dictionary containing INPUT
parameters. Optional.
out_params (list[OutParam]): A Dictionary containing OUTPUT
parameters. Optional.
get_out_params (bool): A flag indicating whether or not to
return OUTPUT parameters after execution. Optional.
get_result_set (bool): A flag indicating whether or not to
return a dataset that is the resulting data of the stored
procedure, if any. Optional.
get_ret_val (bool): A flag indicating whether or not to return
the return value of the stored procedure Call. Optional.
return_type_code (int): The return value Type Code. Optional.
get_update_count (bool): A flag indicating whether or not to
return the number of rows modified by the stored
procedure, or -1 if not applicable. Optional.
Returns:
dict: Result dictionary.
"""
_out_params = {}
result = {
"output_params": None,
"result_set": None,
"return_value": None,
"update_count": None,
}
call = system.db.createSProcCall(
procedureName=stored_procedure,
database=database,
tx=transaction,
skipAudit=skip_audit,
)
if in_params is not None:
if not isinstance(in_params, list):
raise TypeError("in_params must be of type 'list'.")
for param in in_params:
call.registerInParam(
param.name_or_index, param.type_code, param.value
)
if out_params is not None:
if not isinstance(in_params, list):
raise TypeError("out_params must be of type 'list'.")
for param in out_params:
call.registerOutParam(param.name_or_index, param.type_code)
if get_ret_val:
call.registerReturnParam(return_type_code)
system.db.execSProcCall(call)
if out_params is not None:
for param in out_params:
_out_params[param.name_or_index] = call.getOutParamValue(
param.name_or_index
)
result["output_params"] = _out_params if get_out_params else None
result["result_set"] = call.getResultSet() if get_result_set else None
result["return_value"] = call.getReturnValue() if get_ret_val else None
result["update_count"] = (
call.getUpdateCount() if get_update_count else None
)
return result
def check(stored_procedure, database="", params=None):
"""Execute a stored procedure against the connection.
This will return a flag set to TRUE or FALSE.
Args:
stored_procedure (str): The name of the stored procedure to
execute.
database (str): The name of the database connection to execute
against. If omitted or "", the project's default database
connection will be used. Optional.
params (list[InParam]): A Dictionary containing all parameters.
Optional.
Returns:
bool: The flag.
"""
output = OutParam("flag", system.db.BIT)
output_params = get_output_params(
stored_procedure, output=[output], database=database, params=params
)
return output_params["flag"]
def execute_non_query(
stored_procedure, database="", transaction=None, params=None
):
"""Execute a stored procedure against the connection.
Used for UPDATE, INSERT, and DELETE statements.
Args:
stored_procedure (str): The name of the stored procedure to
execute.
database (str): The name of the database connection to execute
against. If omitted or "", the project's default database
connection will be used. Optional.
transaction (str): A transaction identifier. If omitted, the
call will be executed in its own transaction. Optional.
params (list[InParam]): A Dictionary containing all parameters.
Optional.
Returns:
int: The number of rows modified by the stored procedure, or
-1 if not applicable.
"""
result = _execute_sp(
stored_procedure,
database=database,
transaction=transaction,
in_params=params,
get_update_count=True,
)
return result["update_count"]
def get_data(stored_procedure, database="", params=None):
"""Get data by executing a stored procedure.
Args:
stored_procedure (str): The name of the stored procedure to
execute.
database (str): The name of the database connection to execute
against. If omitted or "", the project's default database
connection will be used. Optional.
params (list[InParam]): A Dictionary containing all parameters.
Optional.
Returns:
BasicDataset: A Dataset that is the resulting data of the stored
procedure call, if any.
"""
result = _execute_sp(
stored_procedure,
database=database,
in_params=params,
get_result_set=True,
)
return result["result_set"]
def get_output_params(
stored_procedure, output, database="", transaction=None, params=None
):
"""Get the Output parameters from the Stored Procedure.
Args:
stored_procedure (str): The name of the stored procedure to
execute.
output (list[OutParam]): A Dictionary containing all output
parameters.
database (str): The name of the database connection to execute
against. If omitted or "", the project's default database
connection will be used. Optional.
transaction (str): A transaction identifier. If omitted, the
call will be executed in its own transaction. Optional.
params (list[InParam]): A Dictionary containing all parameters.
Optional.
Returns:
dict: Result's output_params.
"""
result = _execute_sp(
stored_procedure,
database=database,
transaction=transaction,
in_params=params,
out_params=output,
get_out_params=True,
)
return result["output_params"]
def get_return_value(
stored_procedure,
return_type_code,
database="",
transaction=None,
params=None,
):
"""Get the Return Value from the Stored Procedure.
Args:
stored_procedure (str): The name of the stored procedure to
execute.
return_type_code (int): The Type Code of the Return Value.
database (str): The name of the database connection to execute
against. If omitted or "", the project's default database
connection will be used. Optional.
transaction (str): A transaction identifier. If omitted, the
call will be executed in its own transaction. Optional.
params (list[InParam]): A Dictionary containing all parameters.
Optional.
Returns:
int: The return value.
"""
result = _execute_sp(
stored_procedure,
database=database,
transaction=transaction,
in_params=params,
return_type_code=return_type_code,
get_ret_val=True,
)
return result["return_value"] | en | 0.611774 | Database module. Disposable Connection. A disposable connection enables a database connection in Ignition and disables it once the operation is completed to release resources. Disposable Connection initializer. Args: database (str): The name of the database connection in Ignition. retries (int): The number of additional times to retry enabling the connection. Optional. Enter the runtime context related to this object. Exit the runtime context related to this object. Get connection status. Base class used for defining [In|Out]put parameters. Param object initializer. Args: name_or_index (object): Parameter name or index. type_code (int): Type code constant. value (object): Value of type type_code. Get value of name_or_index. Get value of type_code. Get value of value. Class used for declaring INPUT parameters. Create an instance of InParam. Args: name_or_index (object): Index (int starting at 1, not 0), or name (str). type_code (int): Type code constant from `system.db`. value (object): Value of type type_code. Class used for declaring OUTPUT parameters. Create an instance of OutParam. Args: name_or_index (object): Index (int starting at 1, not 0), or name (str). type_code (int): Type code constant from `system.db`. Execute a stored procedure against the connection. Args: stored_procedure (str): The name of the stored procedure to execute. database (str): The name of the database connection to execute against. If omitted or "", the project's default database connection will be used. Optional. transaction (str): A transaction identifier. If omitted, the call will be executed in its own transaction. Optional. skip_audit (bool): A flag which, if set to True, will cause the procedure call to skip the audit system. Useful for some queries that have fields which won't fit into the audit log. Optional. in_params (list[InParam]): A Dictionary containing INPUT parameters. Optional. out_params (list[OutParam]): A Dictionary containing OUTPUT parameters. Optional. get_out_params (bool): A flag indicating whether or not to return OUTPUT parameters after execution. Optional. get_result_set (bool): A flag indicating whether or not to return a dataset that is the resulting data of the stored procedure, if any. Optional. get_ret_val (bool): A flag indicating whether or not to return the return value of the stored procedure Call. Optional. return_type_code (int): The return value Type Code. Optional. get_update_count (bool): A flag indicating whether or not to return the number of rows modified by the stored procedure, or -1 if not applicable. Optional. Returns: dict: Result dictionary. Execute a stored procedure against the connection. This will return a flag set to TRUE or FALSE. Args: stored_procedure (str): The name of the stored procedure to execute. database (str): The name of the database connection to execute against. If omitted or "", the project's default database connection will be used. Optional. params (list[InParam]): A Dictionary containing all parameters. Optional. Returns: bool: The flag. Execute a stored procedure against the connection. Used for UPDATE, INSERT, and DELETE statements. Args: stored_procedure (str): The name of the stored procedure to execute. database (str): The name of the database connection to execute against. If omitted or "", the project's default database connection will be used. Optional. transaction (str): A transaction identifier. If omitted, the call will be executed in its own transaction. Optional. params (list[InParam]): A Dictionary containing all parameters. Optional. Returns: int: The number of rows modified by the stored procedure, or -1 if not applicable. Get data by executing a stored procedure. Args: stored_procedure (str): The name of the stored procedure to execute. database (str): The name of the database connection to execute against. If omitted or "", the project's default database connection will be used. Optional. params (list[InParam]): A Dictionary containing all parameters. Optional. Returns: BasicDataset: A Dataset that is the resulting data of the stored procedure call, if any. Get the Output parameters from the Stored Procedure. Args: stored_procedure (str): The name of the stored procedure to execute. output (list[OutParam]): A Dictionary containing all output parameters. database (str): The name of the database connection to execute against. If omitted or "", the project's default database connection will be used. Optional. transaction (str): A transaction identifier. If omitted, the call will be executed in its own transaction. Optional. params (list[InParam]): A Dictionary containing all parameters. Optional. Returns: dict: Result's output_params. Get the Return Value from the Stored Procedure. Args: stored_procedure (str): The name of the stored procedure to execute. return_type_code (int): The Type Code of the Return Value. database (str): The name of the database connection to execute against. If omitted or "", the project's default database connection will be used. Optional. transaction (str): A transaction identifier. If omitted, the call will be executed in its own transaction. Optional. params (list[InParam]): A Dictionary containing all parameters. Optional. Returns: int: The return value. | 2.55083 | 3 |
solumclient/tests/common/test_github.py | openstack/python-solumclient | 14 | 6616626 | # Copyright (c) 2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from unittest import mock
from solumclient.common import github
from solumclient.tests import base
class TestGitHubAuth(base.TestCase):
fake_repo = "http://github.com/fakeuser/fakerepo.git"
fake_trigger = "http://example.com/trigger/1"
fake_username = 'fakeuser'
fake_password = '<PASSWORD>'
fake_token = 'f<PASSWORD>ken'
def test_invalid_repo(self):
self.assertRaises(ValueError,
github.GitHubAuth,
"http://example.com")
def test_auth_header_username_password(self):
gha = github.GitHubAuth(self.fake_repo,
username=self.fake_username,
password=self.fake_password)
# base64.b64encode('fakeuser:fakepassword') yields 'ZmFrZX...'
expected_auth_header = {
'Content-Type': 'application/json',
'Authorization': 'Basic ZmFrZXVzZXI6ZmFrZXBhc3N3b3Jk',
}
self.assertEqual(expected_auth_header, gha.auth_header)
@mock.patch('getpass.getpass')
def test_auth_header_username_password_2fa(self, fake_getpass):
gha = github.GitHubAuth(self.fake_repo,
username=self.fake_username,
password=self.fake_password)
gha._otp_required = True
fake_getpass.return_value = 'fakeonetime'
expected_auth_header = {
'Content-Type': 'application/json',
'Authorization': 'Basic ZmFrZXVzZXI6ZmFrZXBhc3N3b3Jk',
'x-github-otp': 'fakeonetime',
}
self.assertEqual(expected_auth_header, gha.auth_header)
def test_auth_header_repo_token(self):
gha = github.GitHubAuth(self.fake_repo,
repo_token=self.fake_token)
expected_auth_header = {
'Content-Type': 'application/json',
'Authorization': 'token %s' % self.fake_token,
}
self.assertEqual(expected_auth_header, gha.auth_header)
@mock.patch('httplib2.Http.request')
def test_create_webhook(self, fake_request):
gha = github.GitHubAuth(self.fake_repo,
repo_token=self.fake_token)
fake_request.return_value = ({'status': '200'},
'{"token": "%s"}' % self.fake_token)
gha.create_repo_token = mock.MagicMock()
gha.create_repo_token.return_value = 'token<PASSWORD>'
gha.create_webhook(self.fake_trigger)
fake_request.assert_called_once_with(
'https://api.github.com/repos/fakeuser/fakerepo/hooks',
'POST',
headers=mock.ANY,
body=mock.ANY)
expected_body = {
"config": {
"url": self.fake_trigger,
"content_type": "json"},
"name": "web",
"events": ["pull_request", "commit_comment"]}
actual_body = json.loads(fake_request.call_args[1]['body'])
self.assertEqual(expected_body, actual_body)
@mock.patch('httplib2.Http.request')
def test_create_webhook_unittest_only(self, fake_request):
gha = github.GitHubAuth(self.fake_repo,
username=self.fake_username,
password=self.fake_password)
fake_request.return_value = ({'status': '200'},
'{"token": "foo"}')
gha.create_repo_token = mock.MagicMock()
gha.create_repo_token.return_value = 'token123'
gha.create_webhook(self.fake_trigger, workflow=['unittest'])
fake_request.assert_called_once_with(
'https://api.github.com/repos/fakeuser/fakerepo/hooks',
'POST',
headers=mock.ANY,
body=mock.ANY)
expected_body = {
"config": {
"url": self.fake_trigger + "?workflow=unittest",
"content_type": "json"},
"name": "web",
"events": ["pull_request", "commit_comment"]}
actual_body = json.loads(fake_request.call_args[1]['body'])
self.assertEqual(expected_body, actual_body)
@mock.patch('httplib2.Http.request')
def test_create_webhook_unittest_build(self, fake_request):
gha = github.GitHubAuth(self.fake_repo,
username=self.fake_username,
password=<PASSWORD>)
fake_request.return_value = ({'status': '200'},
'{"token": "foo"}')
gha.create_repo_token = mock.MagicMock()
gha.create_repo_token.return_value = 'token123'
gha.create_webhook(self.fake_trigger, workflow=['unittest', 'build'])
fake_request.assert_called_once_with(
'https://api.github.com/repos/fakeuser/fakerepo/hooks',
'POST',
headers=mock.ANY,
body=mock.ANY)
expected_body = {
"config": {
"url": self.fake_trigger + "?workflow=unittest+build",
"content_type": "json"},
"name": "web",
"events": ["pull_request", "commit_comment"]}
actual_body = json.loads(fake_request.call_args[1]['body'])
self.assertEqual(expected_body, actual_body)
@mock.patch('httplib2.Http.request')
def test_add_ssh_key(self, fake_request):
gha = github.GitHubAuth(self.fake_repo,
username=self.fake_username,
password=self.fake_password)
fake_request.return_value = ({'status': '200'},
'{"token": "foo"}')
fake_pub_key = 'foo'
gha.add_ssh_key(public_key=fake_pub_key)
fake_request.assert_called_once_with(
'https://api.github.com/user/keys',
'POST',
headers=mock.ANY,
body=mock.ANY)
expected_body = {"key": "foo", "title": "devops@Solum"}
actual_body = json.loads(fake_request.call_args[1]['body'])
self.assertEqual(expected_body, actual_body)
| # Copyright (c) 2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from unittest import mock
from solumclient.common import github
from solumclient.tests import base
class TestGitHubAuth(base.TestCase):
fake_repo = "http://github.com/fakeuser/fakerepo.git"
fake_trigger = "http://example.com/trigger/1"
fake_username = 'fakeuser'
fake_password = '<PASSWORD>'
fake_token = 'f<PASSWORD>ken'
def test_invalid_repo(self):
self.assertRaises(ValueError,
github.GitHubAuth,
"http://example.com")
def test_auth_header_username_password(self):
gha = github.GitHubAuth(self.fake_repo,
username=self.fake_username,
password=self.fake_password)
# base64.b64encode('fakeuser:fakepassword') yields 'ZmFrZX...'
expected_auth_header = {
'Content-Type': 'application/json',
'Authorization': 'Basic ZmFrZXVzZXI6ZmFrZXBhc3N3b3Jk',
}
self.assertEqual(expected_auth_header, gha.auth_header)
@mock.patch('getpass.getpass')
def test_auth_header_username_password_2fa(self, fake_getpass):
gha = github.GitHubAuth(self.fake_repo,
username=self.fake_username,
password=self.fake_password)
gha._otp_required = True
fake_getpass.return_value = 'fakeonetime'
expected_auth_header = {
'Content-Type': 'application/json',
'Authorization': 'Basic ZmFrZXVzZXI6ZmFrZXBhc3N3b3Jk',
'x-github-otp': 'fakeonetime',
}
self.assertEqual(expected_auth_header, gha.auth_header)
def test_auth_header_repo_token(self):
gha = github.GitHubAuth(self.fake_repo,
repo_token=self.fake_token)
expected_auth_header = {
'Content-Type': 'application/json',
'Authorization': 'token %s' % self.fake_token,
}
self.assertEqual(expected_auth_header, gha.auth_header)
@mock.patch('httplib2.Http.request')
def test_create_webhook(self, fake_request):
gha = github.GitHubAuth(self.fake_repo,
repo_token=self.fake_token)
fake_request.return_value = ({'status': '200'},
'{"token": "%s"}' % self.fake_token)
gha.create_repo_token = mock.MagicMock()
gha.create_repo_token.return_value = 'token<PASSWORD>'
gha.create_webhook(self.fake_trigger)
fake_request.assert_called_once_with(
'https://api.github.com/repos/fakeuser/fakerepo/hooks',
'POST',
headers=mock.ANY,
body=mock.ANY)
expected_body = {
"config": {
"url": self.fake_trigger,
"content_type": "json"},
"name": "web",
"events": ["pull_request", "commit_comment"]}
actual_body = json.loads(fake_request.call_args[1]['body'])
self.assertEqual(expected_body, actual_body)
@mock.patch('httplib2.Http.request')
def test_create_webhook_unittest_only(self, fake_request):
gha = github.GitHubAuth(self.fake_repo,
username=self.fake_username,
password=self.fake_password)
fake_request.return_value = ({'status': '200'},
'{"token": "foo"}')
gha.create_repo_token = mock.MagicMock()
gha.create_repo_token.return_value = 'token123'
gha.create_webhook(self.fake_trigger, workflow=['unittest'])
fake_request.assert_called_once_with(
'https://api.github.com/repos/fakeuser/fakerepo/hooks',
'POST',
headers=mock.ANY,
body=mock.ANY)
expected_body = {
"config": {
"url": self.fake_trigger + "?workflow=unittest",
"content_type": "json"},
"name": "web",
"events": ["pull_request", "commit_comment"]}
actual_body = json.loads(fake_request.call_args[1]['body'])
self.assertEqual(expected_body, actual_body)
@mock.patch('httplib2.Http.request')
def test_create_webhook_unittest_build(self, fake_request):
gha = github.GitHubAuth(self.fake_repo,
username=self.fake_username,
password=<PASSWORD>)
fake_request.return_value = ({'status': '200'},
'{"token": "foo"}')
gha.create_repo_token = mock.MagicMock()
gha.create_repo_token.return_value = 'token123'
gha.create_webhook(self.fake_trigger, workflow=['unittest', 'build'])
fake_request.assert_called_once_with(
'https://api.github.com/repos/fakeuser/fakerepo/hooks',
'POST',
headers=mock.ANY,
body=mock.ANY)
expected_body = {
"config": {
"url": self.fake_trigger + "?workflow=unittest+build",
"content_type": "json"},
"name": "web",
"events": ["pull_request", "commit_comment"]}
actual_body = json.loads(fake_request.call_args[1]['body'])
self.assertEqual(expected_body, actual_body)
@mock.patch('httplib2.Http.request')
def test_add_ssh_key(self, fake_request):
gha = github.GitHubAuth(self.fake_repo,
username=self.fake_username,
password=self.fake_password)
fake_request.return_value = ({'status': '200'},
'{"token": "foo"}')
fake_pub_key = 'foo'
gha.add_ssh_key(public_key=fake_pub_key)
fake_request.assert_called_once_with(
'https://api.github.com/user/keys',
'POST',
headers=mock.ANY,
body=mock.ANY)
expected_body = {"key": "foo", "title": "devops@Solum"}
actual_body = json.loads(fake_request.call_args[1]['body'])
self.assertEqual(expected_body, actual_body)
| en | 0.8255 | # Copyright (c) 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # base64.b64encode('fakeuser:fakepassword') yields 'ZmFrZX...' | 2.046409 | 2 |
musicscore/musicxml/attributes/grace_attributes.py | alexgorji/music_score | 2 | 6616627 | <reponame>alexgorji/music_score
from musicscore.musicxml.attributes.attribute_abstract import AttributeAbstract
class StealTimePrevious(AttributeAbstract):
def __init__(self, steal_time_previous=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.generate_attribute('steal-time-previous', steal_time_previous, 'Percent')
class StealTimeFollowing(AttributeAbstract):
def __init__(self, steal_time_following=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.generate_attribute('steal-time-following', steal_time_following, 'TypePercent')
class MakeTime(AttributeAbstract):
def __init__(self, make_time=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.generate_attribute('make-time', make_time, 'TypeDivisions')
class Slash(AttributeAbstract):
def __init__(self, slash=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.generate_attribute('slash', slash, 'TypeYesNo')
| from musicscore.musicxml.attributes.attribute_abstract import AttributeAbstract
class StealTimePrevious(AttributeAbstract):
def __init__(self, steal_time_previous=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.generate_attribute('steal-time-previous', steal_time_previous, 'Percent')
class StealTimeFollowing(AttributeAbstract):
def __init__(self, steal_time_following=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.generate_attribute('steal-time-following', steal_time_following, 'TypePercent')
class MakeTime(AttributeAbstract):
def __init__(self, make_time=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.generate_attribute('make-time', make_time, 'TypeDivisions')
class Slash(AttributeAbstract):
def __init__(self, slash=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.generate_attribute('slash', slash, 'TypeYesNo') | none | 1 | 2.792487 | 3 | |
timescaledb/smudge_timestamps.py | cmu-db/noisepage-stats | 23 | 6616628 | <reponame>cmu-db/noisepage-stats
import psycopg2
import argparse
from datetime import timedelta
from random import randrange
def fetch_all_time(conn, table):
query = f"""
SELECT
time,
id
FROM {table}
ORDER BY time ASC
"""
with conn.cursor() as cur:
cur.execute(query)
return cur.fetchall()
def find_times_with_duplicates(conn, table):
query = f"""
SELECT
time,
COUNT(*)
FROM {table}
GROUP BY time
HAVING COUNT(*) > 1
"""
with conn.cursor() as cur:
cur.execute(query)
return cur.fetchall()
def find_records_with_duplicate_times(conn, table, time):
query = f"""
SELECT
time,
id
FROM {table}
WHERE
time = '{time}'
"""
with conn.cursor() as cur:
cur.execute(query)
return cur.fetchall()
def update_with_smudge(conn, table, record):
old_time = record[0]
new_time = old_time + timedelta(milliseconds=randrange(10))
sql_statement = f"""
UPDATE {table}
SET time = '{new_time}'
WHERE
id = '{record[1]}'
"""
with conn.cursor() as cur:
cur.execute(sql_statement)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--username', type=str, help='Database username')
parser.add_argument('--password', type=str, help='Datatbase password')
parser.add_argument('--host', type=str, default='incrudibles-production.db.pdl.cmu.edu', help='Hostname of the database (i.e. incrudibles-production.db.pdl.cmu.edu')
parser.add_argument('--port', type=str, default='32003', help='Port that the DB is running on.')
args = parser.parse_args()
username = args.username
password = <PASSWORD>.password
host = args.host
port = args.port
conn = psycopg2.connect(f'postgres://{username}:{password}@{host}:{port}/pss_database')
table = 'oltpbench_results'
all_table_records = find_times_with_duplicates(conn, table)
count = 1
for time, _ in all_table_records:
print(count)
count +=1
duplicate_time_records = find_records_with_duplicate_times(conn, table, time)
for record in duplicate_time_records:
update_with_smudge(conn, table, record)
conn.commit()
if __name__ == "__main__":
main() | import psycopg2
import argparse
from datetime import timedelta
from random import randrange
def fetch_all_time(conn, table):
query = f"""
SELECT
time,
id
FROM {table}
ORDER BY time ASC
"""
with conn.cursor() as cur:
cur.execute(query)
return cur.fetchall()
def find_times_with_duplicates(conn, table):
query = f"""
SELECT
time,
COUNT(*)
FROM {table}
GROUP BY time
HAVING COUNT(*) > 1
"""
with conn.cursor() as cur:
cur.execute(query)
return cur.fetchall()
def find_records_with_duplicate_times(conn, table, time):
query = f"""
SELECT
time,
id
FROM {table}
WHERE
time = '{time}'
"""
with conn.cursor() as cur:
cur.execute(query)
return cur.fetchall()
def update_with_smudge(conn, table, record):
old_time = record[0]
new_time = old_time + timedelta(milliseconds=randrange(10))
sql_statement = f"""
UPDATE {table}
SET time = '{new_time}'
WHERE
id = '{record[1]}'
"""
with conn.cursor() as cur:
cur.execute(sql_statement)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--username', type=str, help='Database username')
parser.add_argument('--password', type=str, help='Datatbase password')
parser.add_argument('--host', type=str, default='incrudibles-production.db.pdl.cmu.edu', help='Hostname of the database (i.e. incrudibles-production.db.pdl.cmu.edu')
parser.add_argument('--port', type=str, default='32003', help='Port that the DB is running on.')
args = parser.parse_args()
username = args.username
password = <PASSWORD>.password
host = args.host
port = args.port
conn = psycopg2.connect(f'postgres://{username}:{password}@{host}:{port}/pss_database')
table = 'oltpbench_results'
all_table_records = find_times_with_duplicates(conn, table)
count = 1
for time, _ in all_table_records:
print(count)
count +=1
duplicate_time_records = find_records_with_duplicate_times(conn, table, time)
for record in duplicate_time_records:
update_with_smudge(conn, table, record)
conn.commit()
if __name__ == "__main__":
main() | en | 0.461549 | SELECT time, id FROM {table} ORDER BY time ASC SELECT time, COUNT(*) FROM {table} GROUP BY time HAVING COUNT(*) > 1 SELECT time, id FROM {table} WHERE time = '{time}' UPDATE {table} SET time = '{new_time}' WHERE id = '{record[1]}' | 2.844211 | 3 |
nas/wrappers/storage.py | SilverDragon135/Neo-Alias | 2 | 6616629 | <reponame>SilverDragon135/Neo-Alias
from boa.blockchain.vm.Neo.Storage import GetContext, Get, Put, Delete
from boa.code.builtins import concat, range
class Storage:
"""
Simplifies access to storage apis
"""
ctx = GetContext()
def load(self, key):
"""
:param key:
:returns value:
"""
return Get(self.ctx, key)
def save(self, key, value):
"""
:param key:
:param value:
"""
Put(self.ctx, key, value)
def delete(self, key):
"""
:param key:
"""
Delete(self.ctx, key)
def deserialize_bytearray(self, data):
"""
:param data:
\n:returns deserialized data as array:
\nDeserializes bytearray to array
"""
# get length of length
collection_length_length = data[0:1]
# get length of collection
collection_len = data[1:collection_length_length + 1]
# create a new collection
new_collection = list(length=collection_len)
# trim the length data
offset = 1 + collection_length_length
for i in range(0, collection_len):
# get the data length length
itemlen_len = data[offset:offset + 1]
# get the length of the data
item_len = data[offset + 1:offset + 1 + itemlen_len]
# get the data
item = data[offset + 1 + itemlen_len: offset + 1 + itemlen_len + item_len]
# store it in collection
new_collection[i] = item
offset = offset + item_len + itemlen_len + 1
return new_collection
def serialize_array(self, items):
"""
:param items:
\n:returns serialized items as bytearray:
\nSerializes array to bytearray
"""
# serialize the length of the list
itemlength = self.serialize_var_length_item(items)
output = itemlength
# now go through and append all your stuff
for item in items:
# get the variable length of the item
# to be serialized
itemlen = self.serialize_var_length_item(item)
# add that indicator
output = concat(output, itemlen)
# now add the item
output = concat(output, item)
# return the stuff
return output
def serialize_var_length_item(self, item):
"""
:param item:
\n:returns serialized var lenth item:
\nserializes length of item
"""
# get the length of your stuff
stuff_len = len(item)
# now we need to know how many bytes the length of the array
# will take to store
# this is one byte
if stuff_len <= 255:
byte_len = b'\x01'
# two byte
elif stuff_len <= 65535:
byte_len = b'\x02'
# hopefully 4 byte
else:
byte_len = b'\x04'
out = concat(byte_len, stuff_len)
return out
| from boa.blockchain.vm.Neo.Storage import GetContext, Get, Put, Delete
from boa.code.builtins import concat, range
class Storage:
"""
Simplifies access to storage apis
"""
ctx = GetContext()
def load(self, key):
"""
:param key:
:returns value:
"""
return Get(self.ctx, key)
def save(self, key, value):
"""
:param key:
:param value:
"""
Put(self.ctx, key, value)
def delete(self, key):
"""
:param key:
"""
Delete(self.ctx, key)
def deserialize_bytearray(self, data):
"""
:param data:
\n:returns deserialized data as array:
\nDeserializes bytearray to array
"""
# get length of length
collection_length_length = data[0:1]
# get length of collection
collection_len = data[1:collection_length_length + 1]
# create a new collection
new_collection = list(length=collection_len)
# trim the length data
offset = 1 + collection_length_length
for i in range(0, collection_len):
# get the data length length
itemlen_len = data[offset:offset + 1]
# get the length of the data
item_len = data[offset + 1:offset + 1 + itemlen_len]
# get the data
item = data[offset + 1 + itemlen_len: offset + 1 + itemlen_len + item_len]
# store it in collection
new_collection[i] = item
offset = offset + item_len + itemlen_len + 1
return new_collection
def serialize_array(self, items):
"""
:param items:
\n:returns serialized items as bytearray:
\nSerializes array to bytearray
"""
# serialize the length of the list
itemlength = self.serialize_var_length_item(items)
output = itemlength
# now go through and append all your stuff
for item in items:
# get the variable length of the item
# to be serialized
itemlen = self.serialize_var_length_item(item)
# add that indicator
output = concat(output, itemlen)
# now add the item
output = concat(output, item)
# return the stuff
return output
def serialize_var_length_item(self, item):
"""
:param item:
\n:returns serialized var lenth item:
\nserializes length of item
"""
# get the length of your stuff
stuff_len = len(item)
# now we need to know how many bytes the length of the array
# will take to store
# this is one byte
if stuff_len <= 255:
byte_len = b'\x01'
# two byte
elif stuff_len <= 65535:
byte_len = b'\x02'
# hopefully 4 byte
else:
byte_len = b'\x04'
out = concat(byte_len, stuff_len)
return out | en | 0.619829 | Simplifies access to storage apis :param key: :returns value: :param key: :param value: :param key: :param data: \n:returns deserialized data as array: \nDeserializes bytearray to array # get length of length # get length of collection # create a new collection # trim the length data # get the data length length # get the length of the data # get the data # store it in collection :param items: \n:returns serialized items as bytearray: \nSerializes array to bytearray # serialize the length of the list # now go through and append all your stuff # get the variable length of the item # to be serialized # add that indicator # now add the item # return the stuff :param item: \n:returns serialized var lenth item: \nserializes length of item # get the length of your stuff # now we need to know how many bytes the length of the array # will take to store # this is one byte # two byte # hopefully 4 byte | 2.482915 | 2 |
IM_calculation/source_site_dist/src_site_dist.py | ucgmsim/IM_calculation | 0 | 6616630 | <filename>IM_calculation/source_site_dist/src_site_dist.py
from typing import List, Dict
import matplotlib.path as mpltPath
import numba
import numpy as np
from qcore.geo import get_distances, ll_cross_along_track_dist, ll_bearing
numba.config.THREADING_LAYER = "omp"
h_dist_f = numba.njit(get_distances)
VOLCANIC_FRONT_COORDS = [(175.508, -39.364), (177.199, -37.73)]
VOLCANIC_FRONT_LINE = mpltPath.Path(VOLCANIC_FRONT_COORDS)
@numba.jit(parallel=True)
def calc_rrup_rjb(srf_points: np.ndarray, locations: np.ndarray):
"""Calculates rrup and rjb distance
Parameters
----------
srf_points: np.ndarray
The fault points from the srf file (qcore, srf.py, read_srf_points),
format (lon, lat, depth)
locations: np.ndarray
The locations for which to calculate the distances,
format (lon, lat, depth)
Returns
-------
rrups : np.ndarray
The rrup distance for the locations, shape/order same as locations
rjb : np.ndarray
The rjb distance for the locations, shape/order same as locations
"""
rrups = np.empty(locations.shape[0])
rjb = np.empty(locations.shape[0])
for loc_ix in numba.prange(locations.shape[0]):
h_dist = h_dist_f(srf_points, locations[loc_ix, 0], locations[loc_ix, 1])
v_dist = srf_points[:, 2] - locations[loc_ix, 2]
d = np.sqrt(h_dist ** 2 + v_dist ** 2)
rrups[loc_ix] = np.min(d)
rjb[loc_ix] = np.min(h_dist)
return rrups, rjb
def calc_rx_ry(
srf_points: np.ndarray,
plane_infos: List[Dict],
locations: np.ndarray,
hypocentre_origin=False,
type=2,
):
"""
A wrapper script allowing external function calls to resolve to the correct location
:param srf_points: An array with shape (n, 3) giving the lon, lat, depth location of each subfault
:param plane_infos: A list of srf header dictionaries, as retrieved from qcore.srf.get_headers with idx=True
:param locations: An array with shape (m, 2) giving the lon, lat locations of each location to get Rx, Ry values for
:param type: Allows switching between the two GC types if desired
:param hypocentre_origin: If True sets the Ry origin/0 point to the fault trace projection of the hypocentre. If
false the most upstrike subfault of the first fault trace is used. Only used for GC2.
:return: An array with shape (m, 2) giving the Rx, Ry values for each of the given locations
"""
if type == 1:
return calc_rx_ry_GC1(srf_points, plane_infos, locations)
elif type == 2:
return calc_rx_ry_GC2(
srf_points, plane_infos, locations, hypocentre_origin=hypocentre_origin
)
else:
raise ValueError(f"Invalid GC type. {type} not in {{1,2}}")
def calc_rx_ry_GC1(
srf_points: np.ndarray, plane_infos: List[Dict], locations: np.ndarray
):
"""
Calculates Rx and Ry distances using the cross track and along track distance calculations
Uses the plane nearest to each of the given locations if there are multiple
:param srf_points: An array with shape (n, 3) giving the lon, lat, depth location of each subfault
:param plane_infos: A list of srf header dictionaries, as retrieved from qcore.srf.get_headers with idx=True
:param locations: An array with shape (m, 2) giving the lon, lat locations of each location to get Rx, Ry values for
:return: An array with shape (m, 2) giving the Rx, Ry values for each of the given locations
"""
r_x = np.empty(locations.shape[0])
r_y = np.empty(locations.shape[0])
extended_points = np.r_[srf_points, [[0, 0, 0]]]
# Separate the srf points into the different planes
pnt_counts = [plane["nstrike"] * plane["ndip"] for plane in plane_infos]
pnt_counts.insert(0, 0)
pnt_counts = np.cumsum(pnt_counts)
pnt_sections = [
extended_points[pnt_counts[i] : pnt_counts[i + 1]]
for i in range(len(plane_infos))
]
# Get the top/bottom edges of each plane
top_edges = [
section[: header["nstrike"]]
for section, header in zip(pnt_sections, plane_infos)
]
for iloc in range(locations.shape[0]):
lon, lat, *_ = locations[iloc]
if len(plane_infos) > 1:
# Have to work out which plane the point belongs to
# Get cumulative number of points in each plane
# Get the closest point in the fault
h_dist = h_dist_f(srf_points, lon, lat)
# Get the index of closest fault plane point
point_ix = np.argmin(h_dist)
# Check which planes have points with index greater than the nearest point
greater_than_threshold = pnt_counts > point_ix
# Get the first of these planes
plane_ix = greater_than_threshold.searchsorted(True) - 1
else:
# If there is only one plane we don't need to find the nearest plane
plane_ix = 0
up_strike_top_point = top_edges[plane_ix][0, :2]
down_strike_top_point = top_edges[plane_ix][-1, :2]
# If the angle from the first point to the second point is not within 10 degrees of the strike,
# then we should swap the two points
if not np.isclose(
ll_bearing(*up_strike_top_point, *down_strike_top_point),
plane_infos[plane_ix]["strike"],
atol=10,
):
up_strike_top_point, down_strike_top_point = (
down_strike_top_point,
up_strike_top_point,
)
r_x[iloc], r_y[iloc] = ll_cross_along_track_dist(
*up_strike_top_point, *down_strike_top_point, lon, lat
)
return r_x, r_y
def calc_rx_ry_GC2(
srf_points: np.ndarray,
plane_infos: List[Dict],
locations: np.ndarray,
hypocentre_origin=False,
):
"""
Calculates Rx and Ry distances using the cross track and along track distance calculations
If there are multiple fault planes the Rx, Ry values are calculated for each fault plane individually, then weighted
according to plane length and distance to the location
For one fault plane this is the same as the GC1 function
:param srf_points: An array with shape (n, 3) giving the lon, lat, depth location of each subfault
:param plane_infos: A list of srf header dictionaries, as retrieved from qcore.srf.get_headers with idx=True
:param locations: An array with shape (m, 2) giving the lon, lat locations of each location to get Rx, Ry values for
:param hypocentre_origin: If True sets the Ry origin/0 point to the fault trace projection of the hypocentre. If
false the most upstrike subfault of the first fault trace is used
:return: An array with shape (m, 2) giving the Rx, Ry values for each of the given locations
"""
r_x = np.empty(locations.shape[0])
r_y = np.empty(locations.shape[0])
# Separate the srf points into the different plane traces
pnt_counts = [plane["nstrike"] * plane["ndip"] for plane in plane_infos]
pnt_counts.insert(0, 0)
pnt_counts = np.cumsum(pnt_counts)
pnt_sections = [
srf_points[pnt_counts[i] : pnt_counts[i] + header["nstrike"]]
for i, header in enumerate(plane_infos)
]
origin_offset = 0
if hypocentre_origin:
length = sum([plane["length"] for plane in plane_infos])
# Our faults only use one hypocentre
# Will only use the first one found if there are multiple
for plane in plane_infos:
if plane["shyp"] != -999.9000:
origin_offset = -(length / 2 + plane["shyp"])
break
for i, loc in enumerate(locations):
offset = origin_offset
weights = 0
r_x_values = 0
r_y_values = 0
for plane_points, plane_header in zip(pnt_sections, plane_infos):
r_x_p, r_y_p = calc_rx_ry_GC1(
plane_points, [plane_header], np.asarray([loc])
)
dists = h_dist_f(plane_points, loc[0], loc[1])
# Mimimum distance of 0.001km to prevent nans/infs
# A bit hacky but it works. Only needed when a location is directly on top of a subfault
dists = np.maximum(dists, 0.001)
weight = np.sum(np.power(dists, -2))
weights += weight
r_x_values += weight * r_x_p
r_y_values += weight * (r_y_p + offset)
offset += plane_header["length"]
r_x[i] = r_x_values / weights
r_y[i] = r_y_values / weights
return r_x, r_y
def calc_backarc(srf_points: np.ndarray, locations: np.ndarray):
"""
This is a crude approximation of stations that are on the backarc. Defined by source-site lines that cross the
Volcanic front line.
https://user-images.githubusercontent.com/25143301/111406807-ce5bb600-8737-11eb-9c78-b909efe7d9db.png
https://user-images.githubusercontent.com/25143301/111408728-93a74d00-873a-11eb-9afa-5e8371ee2504.png
srf_points: np.ndarray
The fault points from the srf file (qcore, srf.py, read_srf_points),
format (lon, lat, depth)
locations: np.ndarray
The locations for which to calculate the distances,
format (lon, lat, depth)
:return: a numpy array returning 0 if the station is on the forearc and 1 if the station is on the backarc
"""
n_locations = locations.shape[0]
backarc = np.zeros(n_locations, dtype=np.int)
for loc_index in range(n_locations):
# Selection is every 40 SRF points (4 km) - the backarc line is ~200km long.
# In the case of point sources it will just take the first point
for srf_point in srf_points[::40]:
srf_stat_line = mpltPath.Path(
[
(srf_point[0], srf_point[1]),
(locations[loc_index][0], locations[loc_index][1]),
]
)
if VOLCANIC_FRONT_LINE.intersects_path(srf_stat_line):
backarc[loc_index] = 1
break
return backarc
| <filename>IM_calculation/source_site_dist/src_site_dist.py
from typing import List, Dict
import matplotlib.path as mpltPath
import numba
import numpy as np
from qcore.geo import get_distances, ll_cross_along_track_dist, ll_bearing
numba.config.THREADING_LAYER = "omp"
h_dist_f = numba.njit(get_distances)
VOLCANIC_FRONT_COORDS = [(175.508, -39.364), (177.199, -37.73)]
VOLCANIC_FRONT_LINE = mpltPath.Path(VOLCANIC_FRONT_COORDS)
@numba.jit(parallel=True)
def calc_rrup_rjb(srf_points: np.ndarray, locations: np.ndarray):
"""Calculates rrup and rjb distance
Parameters
----------
srf_points: np.ndarray
The fault points from the srf file (qcore, srf.py, read_srf_points),
format (lon, lat, depth)
locations: np.ndarray
The locations for which to calculate the distances,
format (lon, lat, depth)
Returns
-------
rrups : np.ndarray
The rrup distance for the locations, shape/order same as locations
rjb : np.ndarray
The rjb distance for the locations, shape/order same as locations
"""
rrups = np.empty(locations.shape[0])
rjb = np.empty(locations.shape[0])
for loc_ix in numba.prange(locations.shape[0]):
h_dist = h_dist_f(srf_points, locations[loc_ix, 0], locations[loc_ix, 1])
v_dist = srf_points[:, 2] - locations[loc_ix, 2]
d = np.sqrt(h_dist ** 2 + v_dist ** 2)
rrups[loc_ix] = np.min(d)
rjb[loc_ix] = np.min(h_dist)
return rrups, rjb
def calc_rx_ry(
srf_points: np.ndarray,
plane_infos: List[Dict],
locations: np.ndarray,
hypocentre_origin=False,
type=2,
):
"""
A wrapper script allowing external function calls to resolve to the correct location
:param srf_points: An array with shape (n, 3) giving the lon, lat, depth location of each subfault
:param plane_infos: A list of srf header dictionaries, as retrieved from qcore.srf.get_headers with idx=True
:param locations: An array with shape (m, 2) giving the lon, lat locations of each location to get Rx, Ry values for
:param type: Allows switching between the two GC types if desired
:param hypocentre_origin: If True sets the Ry origin/0 point to the fault trace projection of the hypocentre. If
false the most upstrike subfault of the first fault trace is used. Only used for GC2.
:return: An array with shape (m, 2) giving the Rx, Ry values for each of the given locations
"""
if type == 1:
return calc_rx_ry_GC1(srf_points, plane_infos, locations)
elif type == 2:
return calc_rx_ry_GC2(
srf_points, plane_infos, locations, hypocentre_origin=hypocentre_origin
)
else:
raise ValueError(f"Invalid GC type. {type} not in {{1,2}}")
def calc_rx_ry_GC1(
srf_points: np.ndarray, plane_infos: List[Dict], locations: np.ndarray
):
"""
Calculates Rx and Ry distances using the cross track and along track distance calculations
Uses the plane nearest to each of the given locations if there are multiple
:param srf_points: An array with shape (n, 3) giving the lon, lat, depth location of each subfault
:param plane_infos: A list of srf header dictionaries, as retrieved from qcore.srf.get_headers with idx=True
:param locations: An array with shape (m, 2) giving the lon, lat locations of each location to get Rx, Ry values for
:return: An array with shape (m, 2) giving the Rx, Ry values for each of the given locations
"""
r_x = np.empty(locations.shape[0])
r_y = np.empty(locations.shape[0])
extended_points = np.r_[srf_points, [[0, 0, 0]]]
# Separate the srf points into the different planes
pnt_counts = [plane["nstrike"] * plane["ndip"] for plane in plane_infos]
pnt_counts.insert(0, 0)
pnt_counts = np.cumsum(pnt_counts)
pnt_sections = [
extended_points[pnt_counts[i] : pnt_counts[i + 1]]
for i in range(len(plane_infos))
]
# Get the top/bottom edges of each plane
top_edges = [
section[: header["nstrike"]]
for section, header in zip(pnt_sections, plane_infos)
]
for iloc in range(locations.shape[0]):
lon, lat, *_ = locations[iloc]
if len(plane_infos) > 1:
# Have to work out which plane the point belongs to
# Get cumulative number of points in each plane
# Get the closest point in the fault
h_dist = h_dist_f(srf_points, lon, lat)
# Get the index of closest fault plane point
point_ix = np.argmin(h_dist)
# Check which planes have points with index greater than the nearest point
greater_than_threshold = pnt_counts > point_ix
# Get the first of these planes
plane_ix = greater_than_threshold.searchsorted(True) - 1
else:
# If there is only one plane we don't need to find the nearest plane
plane_ix = 0
up_strike_top_point = top_edges[plane_ix][0, :2]
down_strike_top_point = top_edges[plane_ix][-1, :2]
# If the angle from the first point to the second point is not within 10 degrees of the strike,
# then we should swap the two points
if not np.isclose(
ll_bearing(*up_strike_top_point, *down_strike_top_point),
plane_infos[plane_ix]["strike"],
atol=10,
):
up_strike_top_point, down_strike_top_point = (
down_strike_top_point,
up_strike_top_point,
)
r_x[iloc], r_y[iloc] = ll_cross_along_track_dist(
*up_strike_top_point, *down_strike_top_point, lon, lat
)
return r_x, r_y
def calc_rx_ry_GC2(
srf_points: np.ndarray,
plane_infos: List[Dict],
locations: np.ndarray,
hypocentre_origin=False,
):
"""
Calculates Rx and Ry distances using the cross track and along track distance calculations
If there are multiple fault planes the Rx, Ry values are calculated for each fault plane individually, then weighted
according to plane length and distance to the location
For one fault plane this is the same as the GC1 function
:param srf_points: An array with shape (n, 3) giving the lon, lat, depth location of each subfault
:param plane_infos: A list of srf header dictionaries, as retrieved from qcore.srf.get_headers with idx=True
:param locations: An array with shape (m, 2) giving the lon, lat locations of each location to get Rx, Ry values for
:param hypocentre_origin: If True sets the Ry origin/0 point to the fault trace projection of the hypocentre. If
false the most upstrike subfault of the first fault trace is used
:return: An array with shape (m, 2) giving the Rx, Ry values for each of the given locations
"""
r_x = np.empty(locations.shape[0])
r_y = np.empty(locations.shape[0])
# Separate the srf points into the different plane traces
pnt_counts = [plane["nstrike"] * plane["ndip"] for plane in plane_infos]
pnt_counts.insert(0, 0)
pnt_counts = np.cumsum(pnt_counts)
pnt_sections = [
srf_points[pnt_counts[i] : pnt_counts[i] + header["nstrike"]]
for i, header in enumerate(plane_infos)
]
origin_offset = 0
if hypocentre_origin:
length = sum([plane["length"] for plane in plane_infos])
# Our faults only use one hypocentre
# Will only use the first one found if there are multiple
for plane in plane_infos:
if plane["shyp"] != -999.9000:
origin_offset = -(length / 2 + plane["shyp"])
break
for i, loc in enumerate(locations):
offset = origin_offset
weights = 0
r_x_values = 0
r_y_values = 0
for plane_points, plane_header in zip(pnt_sections, plane_infos):
r_x_p, r_y_p = calc_rx_ry_GC1(
plane_points, [plane_header], np.asarray([loc])
)
dists = h_dist_f(plane_points, loc[0], loc[1])
# Mimimum distance of 0.001km to prevent nans/infs
# A bit hacky but it works. Only needed when a location is directly on top of a subfault
dists = np.maximum(dists, 0.001)
weight = np.sum(np.power(dists, -2))
weights += weight
r_x_values += weight * r_x_p
r_y_values += weight * (r_y_p + offset)
offset += plane_header["length"]
r_x[i] = r_x_values / weights
r_y[i] = r_y_values / weights
return r_x, r_y
def calc_backarc(srf_points: np.ndarray, locations: np.ndarray):
"""
This is a crude approximation of stations that are on the backarc. Defined by source-site lines that cross the
Volcanic front line.
https://user-images.githubusercontent.com/25143301/111406807-ce5bb600-8737-11eb-9c78-b909efe7d9db.png
https://user-images.githubusercontent.com/25143301/111408728-93a74d00-873a-11eb-9afa-5e8371ee2504.png
srf_points: np.ndarray
The fault points from the srf file (qcore, srf.py, read_srf_points),
format (lon, lat, depth)
locations: np.ndarray
The locations for which to calculate the distances,
format (lon, lat, depth)
:return: a numpy array returning 0 if the station is on the forearc and 1 if the station is on the backarc
"""
n_locations = locations.shape[0]
backarc = np.zeros(n_locations, dtype=np.int)
for loc_index in range(n_locations):
# Selection is every 40 SRF points (4 km) - the backarc line is ~200km long.
# In the case of point sources it will just take the first point
for srf_point in srf_points[::40]:
srf_stat_line = mpltPath.Path(
[
(srf_point[0], srf_point[1]),
(locations[loc_index][0], locations[loc_index][1]),
]
)
if VOLCANIC_FRONT_LINE.intersects_path(srf_stat_line):
backarc[loc_index] = 1
break
return backarc
| en | 0.817164 | Calculates rrup and rjb distance Parameters ---------- srf_points: np.ndarray The fault points from the srf file (qcore, srf.py, read_srf_points), format (lon, lat, depth) locations: np.ndarray The locations for which to calculate the distances, format (lon, lat, depth) Returns ------- rrups : np.ndarray The rrup distance for the locations, shape/order same as locations rjb : np.ndarray The rjb distance for the locations, shape/order same as locations A wrapper script allowing external function calls to resolve to the correct location :param srf_points: An array with shape (n, 3) giving the lon, lat, depth location of each subfault :param plane_infos: A list of srf header dictionaries, as retrieved from qcore.srf.get_headers with idx=True :param locations: An array with shape (m, 2) giving the lon, lat locations of each location to get Rx, Ry values for :param type: Allows switching between the two GC types if desired :param hypocentre_origin: If True sets the Ry origin/0 point to the fault trace projection of the hypocentre. If false the most upstrike subfault of the first fault trace is used. Only used for GC2. :return: An array with shape (m, 2) giving the Rx, Ry values for each of the given locations Calculates Rx and Ry distances using the cross track and along track distance calculations Uses the plane nearest to each of the given locations if there are multiple :param srf_points: An array with shape (n, 3) giving the lon, lat, depth location of each subfault :param plane_infos: A list of srf header dictionaries, as retrieved from qcore.srf.get_headers with idx=True :param locations: An array with shape (m, 2) giving the lon, lat locations of each location to get Rx, Ry values for :return: An array with shape (m, 2) giving the Rx, Ry values for each of the given locations # Separate the srf points into the different planes # Get the top/bottom edges of each plane # Have to work out which plane the point belongs to # Get cumulative number of points in each plane # Get the closest point in the fault # Get the index of closest fault plane point # Check which planes have points with index greater than the nearest point # Get the first of these planes # If there is only one plane we don't need to find the nearest plane # If the angle from the first point to the second point is not within 10 degrees of the strike, # then we should swap the two points Calculates Rx and Ry distances using the cross track and along track distance calculations If there are multiple fault planes the Rx, Ry values are calculated for each fault plane individually, then weighted according to plane length and distance to the location For one fault plane this is the same as the GC1 function :param srf_points: An array with shape (n, 3) giving the lon, lat, depth location of each subfault :param plane_infos: A list of srf header dictionaries, as retrieved from qcore.srf.get_headers with idx=True :param locations: An array with shape (m, 2) giving the lon, lat locations of each location to get Rx, Ry values for :param hypocentre_origin: If True sets the Ry origin/0 point to the fault trace projection of the hypocentre. If false the most upstrike subfault of the first fault trace is used :return: An array with shape (m, 2) giving the Rx, Ry values for each of the given locations # Separate the srf points into the different plane traces # Our faults only use one hypocentre # Will only use the first one found if there are multiple # Mimimum distance of 0.001km to prevent nans/infs # A bit hacky but it works. Only needed when a location is directly on top of a subfault This is a crude approximation of stations that are on the backarc. Defined by source-site lines that cross the Volcanic front line. https://user-images.githubusercontent.com/25143301/111406807-ce5bb600-8737-11eb-9c78-b909efe7d9db.png https://user-images.githubusercontent.com/25143301/111408728-93a74d00-873a-11eb-9afa-5e8371ee2504.png srf_points: np.ndarray The fault points from the srf file (qcore, srf.py, read_srf_points), format (lon, lat, depth) locations: np.ndarray The locations for which to calculate the distances, format (lon, lat, depth) :return: a numpy array returning 0 if the station is on the forearc and 1 if the station is on the backarc # Selection is every 40 SRF points (4 km) - the backarc line is ~200km long. # In the case of point sources it will just take the first point | 2.391095 | 2 |
gallery/migrations/0003_auto_20180711_2315.py | mzazakeith/unsplash-clone | 1 | 6616631 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-07-11 20:15
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gallery', '0002_auto_20180711_2247'),
]
operations = [
migrations.RenameField(
model_name='location',
old_name='location_name',
new_name='location',
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-07-11 20:15
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gallery', '0002_auto_20180711_2247'),
]
operations = [
migrations.RenameField(
model_name='location',
old_name='location_name',
new_name='location',
),
]
| en | 0.69588 | # -*- coding: utf-8 -*- # Generated by Django 1.11 on 2018-07-11 20:15 | 1.639264 | 2 |
grasp/utils/graph_utils.py | samuelstanton/GraSP | 0 | 6616632 | <gh_stars>0
import os
import math
import torch
from graphviz import Digraph
class GraphNode(object):
"""Simple aggregation op"""
def __init__(self, in_degree):
super().__init__()
self.in_degree = in_degree
def __call__(self, edge_features):
return torch.stack(edge_features).sum(0)
class GraphEdge(torch.nn.Module):
"""Module for computing weighted outputs for an arbitrary collection of ops"""
def __init__(self, op_dict, op_weights=None):
super().__init__()
self.op_dict = torch.nn.ModuleDict(op_dict)
if op_weights is None:
op_weights = torch.ones(self.num_ops) / self.num_ops
else:
assert op_weights.size(0) == len(op_dict)
self.register_parameter('weight', torch.nn.Parameter(op_weights))
def forward(self, node_features):
edge_features = torch.stack([
weight * op(node_features) for weight, op in zip(self.weight.exp(), self.op_modules)
])
return edge_features.sum(0)
def remove_ops(self, idxs):
keys = list(self.op_dict.keys())
for idx in idxs:
del self.op_dict[keys[idx]]
@property
def num_ops(self):
return len(self.op_dict)
@property
def op_names(self):
return list(self.op_dict.keys())
@property
def op_modules(self):
return list(self.op_dict.values())
class GraphLayer(torch.nn.Module):
"""
Module for representing an arbitrary computation graph. Ops can be masked and removed
using the `sparsify` method. The graph structure is defined by `adj_dict`, which has items of
the form `{tail_node_idx: (head_node_idx, edge_idx)}`. The total number of tuples should equal the
number of edges. `adj_dict` can be thought of as a compact representation of the adjacency matrix.
The keys of `adj_dict` should go from `1` to `N-1`, where `N` is the number of nodes in the graph. The
first node is defined implicitly.
"""
def __init__(self, edges, adj_dict):
"""
edges: list of op modules,
adj_dict: dict defining edge adjacency
"""
super().__init__()
self.edges = torch.nn.ModuleList(edges)
self.adj_dict = adj_dict
self.nodes = self._create_nodes()
self._init_op_weights()
def _create_nodes(self):
nodes = [GraphNode(in_degree=1)]
for tail_node_id, head_edge_tuples in self.adj_dict.items():
in_degree = sum([self.edges[edge_id].num_ops for _, edge_id in head_edge_tuples])
nodes.append(GraphNode(in_degree))
return nodes
def _init_op_weights(self):
for tail_node_idx, head_edge_tuple in self.adj_dict.items():
tail_node = self.nodes[tail_node_idx]
stdv = 1. / math.sqrt(self.num_ops)
for _, edge_idx in head_edge_tuple:
edge = self.edges[edge_idx]
# edge.weight.data.uniform_(-stdv, stdv) # use this for linear op weights
edge.weight.data.uniform_(math.log(stdv / 2), math.log(stdv))
def forward(self, inputs):
node_features = [self.nodes[0]([inputs])]
for tail_node_idx, head_edge_tuples in self.adj_dict.items():
edge_features = []
for head_node_idx, edge_idx in head_edge_tuples:
edge_features.append(self.edges[edge_idx](node_features[head_node_idx]))
if len(edge_features) > 0:
node_features.append(self.nodes[tail_node_idx](edge_features))
else:
node_features.append(torch.zeros_like(inputs) * inputs)
return node_features[-1]
def sparsify(self, op_masks):
assert len(op_masks) == self.num_edges
for edge_idx, mask in enumerate(op_masks):
if torch.all(mask == 0):
continue
if torch.any(mask == 0):
drop_idxs = [idx.item() for idx in torch.nonzero(mask == 0, as_tuple=False)]
self.edges[edge_idx].remove_ops(drop_idxs)
op_dict = dict(self.edges[edge_idx].op_dict.items())
op_weights = self.edges[edge_idx].weight[mask]
self.edges[edge_idx] = GraphEdge(op_dict, op_weights)
drop_edges = [i for i in range(self.num_edges) if torch.all(op_masks[i] == 0)]
self.remove_edges(drop_edges)
def remove_edges(self, idxs):
# update edge list
self.edges = torch.nn.ModuleList(drop_list_elements(self.edges, idxs))
# update adjacency dict
edge_count = 0
for tail_node_idx, head_edge_tuples in self.adj_dict.items():
drop_tuples = []
for tuple_idx, (head_node_idx, edge_idx) in enumerate(head_edge_tuples):
if edge_idx in idxs:
drop_tuples.append(tuple_idx)
else:
self.adj_dict[tail_node_idx][tuple_idx] = (head_node_idx, edge_count)
edge_count += 1
self.adj_dict[tail_node_idx] = drop_list_elements(self.adj_dict[tail_node_idx], drop_tuples)
# reconstruct aggregation nodes
self.nodes = self._create_nodes()
def draw_graph(self, log_dir, graph_name, view=False):
graph = Digraph()
for node_idx in range(self.num_nodes):
graph.node(str(node_idx))
for tail_node_idx, head_edge_tuples in self.adj_dict.items():
for head_node_idx, edge_idx in head_edge_tuples:
for op_name in self.edges[edge_idx].op_dict.keys():
graph.edge(str(head_node_idx), str(tail_node_idx), label=op_name)
graph.render(os.path.join(log_dir, f"{graph_name}.pdf"), view=view)
graph.save(graph_name, log_dir)
@property
def edge_weights(self):
edge_weights = torch.stack([edge.weight for edge in self.edges])
try:
grads = torch.stack(edge.weight.grad for edge in self.edges)
edge_weights.grad = grads
except:
pass
return edge_weights
@property
def num_nodes(self):
return len(self.nodes)
@property
def num_edges(self):
return len(self.edges)
@property
def num_ops(self):
num_ops = 0
for edge in self.edges:
num_ops += edge.num_ops
return num_ops
def drop_list_elements(old_list, idxs):
num_items = len(old_list)
new_list = [old_list[i] for i in range(num_items) if i not in idxs]
return new_list
| import os
import math
import torch
from graphviz import Digraph
class GraphNode(object):
"""Simple aggregation op"""
def __init__(self, in_degree):
super().__init__()
self.in_degree = in_degree
def __call__(self, edge_features):
return torch.stack(edge_features).sum(0)
class GraphEdge(torch.nn.Module):
"""Module for computing weighted outputs for an arbitrary collection of ops"""
def __init__(self, op_dict, op_weights=None):
super().__init__()
self.op_dict = torch.nn.ModuleDict(op_dict)
if op_weights is None:
op_weights = torch.ones(self.num_ops) / self.num_ops
else:
assert op_weights.size(0) == len(op_dict)
self.register_parameter('weight', torch.nn.Parameter(op_weights))
def forward(self, node_features):
edge_features = torch.stack([
weight * op(node_features) for weight, op in zip(self.weight.exp(), self.op_modules)
])
return edge_features.sum(0)
def remove_ops(self, idxs):
keys = list(self.op_dict.keys())
for idx in idxs:
del self.op_dict[keys[idx]]
@property
def num_ops(self):
return len(self.op_dict)
@property
def op_names(self):
return list(self.op_dict.keys())
@property
def op_modules(self):
return list(self.op_dict.values())
class GraphLayer(torch.nn.Module):
"""
Module for representing an arbitrary computation graph. Ops can be masked and removed
using the `sparsify` method. The graph structure is defined by `adj_dict`, which has items of
the form `{tail_node_idx: (head_node_idx, edge_idx)}`. The total number of tuples should equal the
number of edges. `adj_dict` can be thought of as a compact representation of the adjacency matrix.
The keys of `adj_dict` should go from `1` to `N-1`, where `N` is the number of nodes in the graph. The
first node is defined implicitly.
"""
def __init__(self, edges, adj_dict):
"""
edges: list of op modules,
adj_dict: dict defining edge adjacency
"""
super().__init__()
self.edges = torch.nn.ModuleList(edges)
self.adj_dict = adj_dict
self.nodes = self._create_nodes()
self._init_op_weights()
def _create_nodes(self):
nodes = [GraphNode(in_degree=1)]
for tail_node_id, head_edge_tuples in self.adj_dict.items():
in_degree = sum([self.edges[edge_id].num_ops for _, edge_id in head_edge_tuples])
nodes.append(GraphNode(in_degree))
return nodes
def _init_op_weights(self):
for tail_node_idx, head_edge_tuple in self.adj_dict.items():
tail_node = self.nodes[tail_node_idx]
stdv = 1. / math.sqrt(self.num_ops)
for _, edge_idx in head_edge_tuple:
edge = self.edges[edge_idx]
# edge.weight.data.uniform_(-stdv, stdv) # use this for linear op weights
edge.weight.data.uniform_(math.log(stdv / 2), math.log(stdv))
def forward(self, inputs):
node_features = [self.nodes[0]([inputs])]
for tail_node_idx, head_edge_tuples in self.adj_dict.items():
edge_features = []
for head_node_idx, edge_idx in head_edge_tuples:
edge_features.append(self.edges[edge_idx](node_features[head_node_idx]))
if len(edge_features) > 0:
node_features.append(self.nodes[tail_node_idx](edge_features))
else:
node_features.append(torch.zeros_like(inputs) * inputs)
return node_features[-1]
def sparsify(self, op_masks):
assert len(op_masks) == self.num_edges
for edge_idx, mask in enumerate(op_masks):
if torch.all(mask == 0):
continue
if torch.any(mask == 0):
drop_idxs = [idx.item() for idx in torch.nonzero(mask == 0, as_tuple=False)]
self.edges[edge_idx].remove_ops(drop_idxs)
op_dict = dict(self.edges[edge_idx].op_dict.items())
op_weights = self.edges[edge_idx].weight[mask]
self.edges[edge_idx] = GraphEdge(op_dict, op_weights)
drop_edges = [i for i in range(self.num_edges) if torch.all(op_masks[i] == 0)]
self.remove_edges(drop_edges)
def remove_edges(self, idxs):
# update edge list
self.edges = torch.nn.ModuleList(drop_list_elements(self.edges, idxs))
# update adjacency dict
edge_count = 0
for tail_node_idx, head_edge_tuples in self.adj_dict.items():
drop_tuples = []
for tuple_idx, (head_node_idx, edge_idx) in enumerate(head_edge_tuples):
if edge_idx in idxs:
drop_tuples.append(tuple_idx)
else:
self.adj_dict[tail_node_idx][tuple_idx] = (head_node_idx, edge_count)
edge_count += 1
self.adj_dict[tail_node_idx] = drop_list_elements(self.adj_dict[tail_node_idx], drop_tuples)
# reconstruct aggregation nodes
self.nodes = self._create_nodes()
def draw_graph(self, log_dir, graph_name, view=False):
graph = Digraph()
for node_idx in range(self.num_nodes):
graph.node(str(node_idx))
for tail_node_idx, head_edge_tuples in self.adj_dict.items():
for head_node_idx, edge_idx in head_edge_tuples:
for op_name in self.edges[edge_idx].op_dict.keys():
graph.edge(str(head_node_idx), str(tail_node_idx), label=op_name)
graph.render(os.path.join(log_dir, f"{graph_name}.pdf"), view=view)
graph.save(graph_name, log_dir)
@property
def edge_weights(self):
edge_weights = torch.stack([edge.weight for edge in self.edges])
try:
grads = torch.stack(edge.weight.grad for edge in self.edges)
edge_weights.grad = grads
except:
pass
return edge_weights
@property
def num_nodes(self):
return len(self.nodes)
@property
def num_edges(self):
return len(self.edges)
@property
def num_ops(self):
num_ops = 0
for edge in self.edges:
num_ops += edge.num_ops
return num_ops
def drop_list_elements(old_list, idxs):
num_items = len(old_list)
new_list = [old_list[i] for i in range(num_items) if i not in idxs]
return new_list | en | 0.77743 | Simple aggregation op Module for computing weighted outputs for an arbitrary collection of ops Module for representing an arbitrary computation graph. Ops can be masked and removed using the `sparsify` method. The graph structure is defined by `adj_dict`, which has items of the form `{tail_node_idx: (head_node_idx, edge_idx)}`. The total number of tuples should equal the number of edges. `adj_dict` can be thought of as a compact representation of the adjacency matrix. The keys of `adj_dict` should go from `1` to `N-1`, where `N` is the number of nodes in the graph. The first node is defined implicitly. edges: list of op modules, adj_dict: dict defining edge adjacency # edge.weight.data.uniform_(-stdv, stdv) # use this for linear op weights # update edge list # update adjacency dict # reconstruct aggregation nodes | 2.887403 | 3 |
scripts/results-analyse-time.py | rgoulter/system-testing | 0 | 6616633 | <reponame>rgoulter/system-testing<filename>scripts/results-analyse-time.py
#! /usr/bin/env python2
# I want to see how much could be tested, using a moderately-long amount of time.
# i.e. How much of an effect is due to TIMEOUTs, and other long-running tests.
from os import listdir, rename
from os.path import isfile, isdir, join
import sys
import json
import math
TIMEOUT = 300*1000
# Example of Results JSON (for valid test):
# {
# "command" : "hip",
# "results" : [ ... ],
# "executionTime" : "515",
# "filename" : "examples/working/hip/threads/no-deadlock-nonlexical.ss",
# "arguments" : "--en-para --en-thrd-resource -tp parahip --en-lsmu-infer"
# }
#
# has "remarks" key if invalid.
def load_json_file(json_path):
f = open(json_path)
content = f.read()
data = json.loads(content)
f.close()
# "purify" the data to a form we care about: (time,
time = int(data["executionTime"])
if time > TIMEOUT:
kind = "TIMEOUT"
elif "remarks" in data:
kind = "INVALID"
else:
kind = "VALID"
# TODO: Could also consider whether the test "PASSED", "FAILED"
return (time, kind)
timeof = lambda (time,kind): time
kindof = lambda (time,kind): kind
def filter_for_kind(kind):
return lambda (xs) : filter(lambda (t,k): k == kind, xs)
filter_for_timeout = filter_for_kind("TIMEOUT")
filter_for_invalid = filter_for_kind("INVALID")
filter_for_valid = filter_for_kind("VALID")
def filter_for_time(time, xs):
return filter(lambda (t,k): t <= time, xs)
def load_files_in_dir(dir_path):
# Strictly assumes everything in dir_path is a json file.
return [load_json_file(join(dir_path, res)) for res in listdir(dir_path)]
# def load_for_results_dir(results_path):
# # Strictly assumes everything in results_path is
# # a dir, (with a commit hash)..
# return [rename_files_in_dir(join(results_path, cmt)) for cmt in listdir(results_path)]
# So what to do with list of the results?
# For percentailes 10...90,
# what the longest-running test is (i.e. value of the percentile),
# summation of the percentiles i.e. how long to run to that percentile
# percentage of all results captured by percentile;
# percentage of passing, failing, invalid
def chart_results(ls_of_res):
sorted_data = sorted(ls_of_res)
sorted_valid_data = filter_for_valid(sorted_data)
sorted_invalid_data = filter_for_invalid(sorted_data)
def percentile_idx(p):
return int(math.ceil(float(len(sorted_data) * p) / 100)) - 1
def print_stats(xs, use_timeout = -1):
# Longest Running
longest = timeof(xs[-1])
print "Longest Time: %8d ms" % longest
# Sum up till that point
total = sum([timeof(d) for d in xs])
print "Total Time (excl T/OUT): %8d ms" % total
tout = max(use_timeout, longest) # use use_timeout, if given, otherwise use `longest`
touts = total + (tout * (len(sorted_data) - len(xs)))
print "Total Time (incl T/OUT): %8d ms" % touts
# What % of all tests are "covered" by `xs`?
print "Percent of All Tests: %3d%%" % (100 * float(len(xs)) / len(sorted_data))
# How many aren't?
print "Complement size (T/OUT): %d" % (len(sorted_data) - len(xs))
print "Percent of VALID tests: %3d%%" % (100 * float(len(filter_for_valid(xs))) / len(sorted_valid_data))
print "Percent of INVALID tests: %3d%%" % (100 * float(len(filter_for_invalid(xs))) / len(sorted_invalid_data))
print "Charting Results"
print "(%d results)" % len(sorted_data)
print
# percentiles 10..90
# n.b. ostensible < 80-percentile not great, so
for percent in range(80, 100, 5):
print "For %d Percentile:" % percent
pidx = percentile_idx(percent)
xs = sorted_data[:pidx+1]
print_stats(xs)
print
# Want to investigate various proposed T/OUT times.
# The largest not-TIMEOUT-time is ~138s, which would surely cost too much.
# for time in range(1800, 3200, 200):
for time in range(1500, 6000, 500):
print "For tentative timeout %d ms:" % time
xs = filter_for_time(time, sorted_data)
print_stats(xs, time)
print
# For reference, if we do *all* tests:
print "For All Tests:"
print_stats(sorted_data)
if __name__ == "__main__":
dirname = sys.argv[1]
res = load_files_in_dir(dirname)
chart_results(res)
print
# Find longest time, before timeout.
# i.e. if time longer than this, 100% chance it will time out.
# (if this is >> 2s, then will be expensive to run all tests..)
print "Count > 300s: %d" % len(filter_for_timeout(res))
| #! /usr/bin/env python2
# I want to see how much could be tested, using a moderately-long amount of time.
# i.e. How much of an effect is due to TIMEOUTs, and other long-running tests.
from os import listdir, rename
from os.path import isfile, isdir, join
import sys
import json
import math
TIMEOUT = 300*1000
# Example of Results JSON (for valid test):
# {
# "command" : "hip",
# "results" : [ ... ],
# "executionTime" : "515",
# "filename" : "examples/working/hip/threads/no-deadlock-nonlexical.ss",
# "arguments" : "--en-para --en-thrd-resource -tp parahip --en-lsmu-infer"
# }
#
# has "remarks" key if invalid.
def load_json_file(json_path):
f = open(json_path)
content = f.read()
data = json.loads(content)
f.close()
# "purify" the data to a form we care about: (time,
time = int(data["executionTime"])
if time > TIMEOUT:
kind = "TIMEOUT"
elif "remarks" in data:
kind = "INVALID"
else:
kind = "VALID"
# TODO: Could also consider whether the test "PASSED", "FAILED"
return (time, kind)
timeof = lambda (time,kind): time
kindof = lambda (time,kind): kind
def filter_for_kind(kind):
return lambda (xs) : filter(lambda (t,k): k == kind, xs)
filter_for_timeout = filter_for_kind("TIMEOUT")
filter_for_invalid = filter_for_kind("INVALID")
filter_for_valid = filter_for_kind("VALID")
def filter_for_time(time, xs):
return filter(lambda (t,k): t <= time, xs)
def load_files_in_dir(dir_path):
# Strictly assumes everything in dir_path is a json file.
return [load_json_file(join(dir_path, res)) for res in listdir(dir_path)]
# def load_for_results_dir(results_path):
# # Strictly assumes everything in results_path is
# # a dir, (with a commit hash)..
# return [rename_files_in_dir(join(results_path, cmt)) for cmt in listdir(results_path)]
# So what to do with list of the results?
# For percentailes 10...90,
# what the longest-running test is (i.e. value of the percentile),
# summation of the percentiles i.e. how long to run to that percentile
# percentage of all results captured by percentile;
# percentage of passing, failing, invalid
def chart_results(ls_of_res):
sorted_data = sorted(ls_of_res)
sorted_valid_data = filter_for_valid(sorted_data)
sorted_invalid_data = filter_for_invalid(sorted_data)
def percentile_idx(p):
return int(math.ceil(float(len(sorted_data) * p) / 100)) - 1
def print_stats(xs, use_timeout = -1):
# Longest Running
longest = timeof(xs[-1])
print "Longest Time: %8d ms" % longest
# Sum up till that point
total = sum([timeof(d) for d in xs])
print "Total Time (excl T/OUT): %8d ms" % total
tout = max(use_timeout, longest) # use use_timeout, if given, otherwise use `longest`
touts = total + (tout * (len(sorted_data) - len(xs)))
print "Total Time (incl T/OUT): %8d ms" % touts
# What % of all tests are "covered" by `xs`?
print "Percent of All Tests: %3d%%" % (100 * float(len(xs)) / len(sorted_data))
# How many aren't?
print "Complement size (T/OUT): %d" % (len(sorted_data) - len(xs))
print "Percent of VALID tests: %3d%%" % (100 * float(len(filter_for_valid(xs))) / len(sorted_valid_data))
print "Percent of INVALID tests: %3d%%" % (100 * float(len(filter_for_invalid(xs))) / len(sorted_invalid_data))
print "Charting Results"
print "(%d results)" % len(sorted_data)
print
# percentiles 10..90
# n.b. ostensible < 80-percentile not great, so
for percent in range(80, 100, 5):
print "For %d Percentile:" % percent
pidx = percentile_idx(percent)
xs = sorted_data[:pidx+1]
print_stats(xs)
print
# Want to investigate various proposed T/OUT times.
# The largest not-TIMEOUT-time is ~138s, which would surely cost too much.
# for time in range(1800, 3200, 200):
for time in range(1500, 6000, 500):
print "For tentative timeout %d ms:" % time
xs = filter_for_time(time, sorted_data)
print_stats(xs, time)
print
# For reference, if we do *all* tests:
print "For All Tests:"
print_stats(sorted_data)
if __name__ == "__main__":
dirname = sys.argv[1]
res = load_files_in_dir(dirname)
chart_results(res)
print
# Find longest time, before timeout.
# i.e. if time longer than this, 100% chance it will time out.
# (if this is >> 2s, then will be expensive to run all tests..)
print "Count > 300s: %d" % len(filter_for_timeout(res)) | en | 0.824676 | #! /usr/bin/env python2 # I want to see how much could be tested, using a moderately-long amount of time. # i.e. How much of an effect is due to TIMEOUTs, and other long-running tests. # Example of Results JSON (for valid test): # { # "command" : "hip", # "results" : [ ... ], # "executionTime" : "515", # "filename" : "examples/working/hip/threads/no-deadlock-nonlexical.ss", # "arguments" : "--en-para --en-thrd-resource -tp parahip --en-lsmu-infer" # } # # has "remarks" key if invalid. # "purify" the data to a form we care about: (time, # TODO: Could also consider whether the test "PASSED", "FAILED" # Strictly assumes everything in dir_path is a json file. # def load_for_results_dir(results_path): # # Strictly assumes everything in results_path is # # a dir, (with a commit hash).. # return [rename_files_in_dir(join(results_path, cmt)) for cmt in listdir(results_path)] # So what to do with list of the results? # For percentailes 10...90, # what the longest-running test is (i.e. value of the percentile), # summation of the percentiles i.e. how long to run to that percentile # percentage of all results captured by percentile; # percentage of passing, failing, invalid # Longest Running # Sum up till that point # use use_timeout, if given, otherwise use `longest` # What % of all tests are "covered" by `xs`? # How many aren't? # percentiles 10..90 # n.b. ostensible < 80-percentile not great, so # Want to investigate various proposed T/OUT times. # The largest not-TIMEOUT-time is ~138s, which would surely cost too much. # for time in range(1800, 3200, 200): # For reference, if we do *all* tests: # Find longest time, before timeout. # i.e. if time longer than this, 100% chance it will time out. # (if this is >> 2s, then will be expensive to run all tests..) | 2.692697 | 3 |
communication.py | Ppasha9/computer-network-coursework | 0 | 6616634 | <filename>communication.py
from multiprocessing import Pipe
class Channel:
"""
Two-way channel between agents.
"""
def __init__(self, pipe):
self.pipe = pipe
def send(self, msg):
self.pipe.send(msg)
def poll(self, timeout=None):
return self.pipe.poll(timeout)
def recv(self):
return self.pipe.recv()
def close(self):
return self.pipe.close()
class BaseConnection:
"""
Connection between two agents: two channels
for parent and child.
"""
def __init__(self):
self.parent, self.child = Pipe()
self.parent_channel = Channel(self.parent)
self.child_channel = Channel(self.child)
def sender(self):
return self.parent_channel
def receiver(self):
return self.child_channel
class Connection(BaseConnection):
"""
Connection that remembers from-to identifiers
"""
def __init__(self, from_idx, to_idx):
super().__init__()
self.parent_channel.type = 'sender'
self.child_channel.type = 'receiver'
self.parent_channel.to_idx = to_idx
self.child_channel.from_idx = from_idx
class Message:
def __init__(self, sender_idx=None, recepient_idx=None, data=None):
self.sender_idx = sender_idx
self.recepient_idx = recepient_idx
self.data = data
def __repr__(self):
return f"<#{type(self)} ({self.sender_idx})->({self.recepient_idx})>" # = [{self.data}]>"
class HelloMessage(Message):
pass
class StartCorrectionMessage(Message):
pass
class StopCorrectionMessage(Message):
pass
class AskEnergyMessage(Message):
pass
class RespondEnergyMessage(Message):
pass
| <filename>communication.py
from multiprocessing import Pipe
class Channel:
"""
Two-way channel between agents.
"""
def __init__(self, pipe):
self.pipe = pipe
def send(self, msg):
self.pipe.send(msg)
def poll(self, timeout=None):
return self.pipe.poll(timeout)
def recv(self):
return self.pipe.recv()
def close(self):
return self.pipe.close()
class BaseConnection:
"""
Connection between two agents: two channels
for parent and child.
"""
def __init__(self):
self.parent, self.child = Pipe()
self.parent_channel = Channel(self.parent)
self.child_channel = Channel(self.child)
def sender(self):
return self.parent_channel
def receiver(self):
return self.child_channel
class Connection(BaseConnection):
"""
Connection that remembers from-to identifiers
"""
def __init__(self, from_idx, to_idx):
super().__init__()
self.parent_channel.type = 'sender'
self.child_channel.type = 'receiver'
self.parent_channel.to_idx = to_idx
self.child_channel.from_idx = from_idx
class Message:
def __init__(self, sender_idx=None, recepient_idx=None, data=None):
self.sender_idx = sender_idx
self.recepient_idx = recepient_idx
self.data = data
def __repr__(self):
return f"<#{type(self)} ({self.sender_idx})->({self.recepient_idx})>" # = [{self.data}]>"
class HelloMessage(Message):
pass
class StartCorrectionMessage(Message):
pass
class StopCorrectionMessage(Message):
pass
class AskEnergyMessage(Message):
pass
class RespondEnergyMessage(Message):
pass
| en | 0.839578 | Two-way channel between agents. Connection between two agents: two channels for parent and child. Connection that remembers from-to identifiers #{type(self)} ({self.sender_idx})->({self.recepient_idx})>" # = [{self.data}]>" | 3.289376 | 3 |
pympts/psi.py | collinforsyth/pympts | 0 | 6616635 | # -*- coding: utf-8 -*-
from collections import namedtuple
PatTable = namedtuple("PatTable", "program_num program_map_pid")
class Psi:
"""
Base class for different PSI tables. Given a PSI packet payload
this will parse out the table header and syntax section
"""
def __init__(self, data):
# Pointer field/filler bytes can be dropped(?)
if data[0] != 0:
# When the pointer field is non-zero, this is the pointer field
# number of alignment padding bytes set to 0xFF or the end of
# the previous table section spanning across TS packets
# TODO: Add this
pass
self.table_id = data[1]
self.section_syntax_ind = data[2] >> 7
self.section_length = ((data[2] & 0x3) << 8) | data[3]
self.table_id_ext = (data[4] << 8) & data[5]
self.version_num = data[6] >> 1 & 0x180
self.current_next_indicator = data[6] & 0x1
self.section_number = data[7]
self.last_section_number = data[8]
class Pat(Psi):
def __init__(self, data):
super().__init__(data)
self.programs = []
for i in range(0, self.last_section_number + 1):
p = PatTable(
program_num=(data[i + 9] << 8) | data[i + 10],
program_map_pid=((data[i + 11] & 0x1F) << 8) | data[i + 12],
)
self.programs.append(p)
def __str__(self):
return "Pat(programs: [{}]".format(
"".join(
"num:{}, pid:{}".format(x.program_num, x.program_map_pid)
for x in self.programs
)
)
class Pmt(Psi):
def __init__(self, data):
super().__init__(data)
self.pcr_pid = ((data[9] & 0x1F) << 8) | data[10]
program_info_len = (data[11] & 0x3) | data[12]
# Need to confirm that multiple sections are used for
# multiple program streams
self.pmt_descriptors = []
self.elementary_streams = []
count = 13
end_bytes = count + program_info_len
while count != end_bytes:
pmt_desc = _Descriptor(data[count:])
self.pmt_descriptors.append(pmt_desc)
count += len(pmt_desc)
# Now read sections until the end
while count != self.section_length:
es = _EsStream(data[count:])
self.elementary_streams.append(es)
count += len(es)
def __str__(self):
return "Pmt(pcr_pid={},descriptors={},elementary_streams={})".format(
self.pcr_pid,
"[{}]".format(",".join(str(x) for x in self.pmt_descriptors)),
"[{}]".format(",".join(str(x) for x in self.elementary_streams)),
)
class _EsStream:
def __init__(self, data):
self.stream_type = data[0]
self.elementary_pid = ((data[1] & 0x1F) << 8) | data[2]
self.pmt_descriptors = []
es_info_length = ((data[3] & 0x3) << 8) | data[4]
count = 5
exit_cond = count + es_info_length
while count != exit_cond:
pmt_desc = _Descriptor(data[count:])
count += len(pmt_desc)
self.pmt_descriptors.append(pmt_desc)
self._bytes = count
def __len__(self):
return self._bytes
def __str__(self):
return "EsStream(stream_type={},elementary_pid={},pmt_descriptors={})".format(
self.stream_type,
self.elementary_pid,
"[{}]".format(",".join(str(x) for x in self.pmt_descriptors)),
)
class _Descriptor:
def __init__(self, data):
self.tag = data[0]
length = data[1]
self.data = data[2 : length + 3]
self._bytes = 2 + length
def __len__(self):
return self._bytes
def __str__(self):
return "Descriptor(tag={}, data={})".format(self.tag, self.data)
| # -*- coding: utf-8 -*-
from collections import namedtuple
PatTable = namedtuple("PatTable", "program_num program_map_pid")
class Psi:
"""
Base class for different PSI tables. Given a PSI packet payload
this will parse out the table header and syntax section
"""
def __init__(self, data):
# Pointer field/filler bytes can be dropped(?)
if data[0] != 0:
# When the pointer field is non-zero, this is the pointer field
# number of alignment padding bytes set to 0xFF or the end of
# the previous table section spanning across TS packets
# TODO: Add this
pass
self.table_id = data[1]
self.section_syntax_ind = data[2] >> 7
self.section_length = ((data[2] & 0x3) << 8) | data[3]
self.table_id_ext = (data[4] << 8) & data[5]
self.version_num = data[6] >> 1 & 0x180
self.current_next_indicator = data[6] & 0x1
self.section_number = data[7]
self.last_section_number = data[8]
class Pat(Psi):
def __init__(self, data):
super().__init__(data)
self.programs = []
for i in range(0, self.last_section_number + 1):
p = PatTable(
program_num=(data[i + 9] << 8) | data[i + 10],
program_map_pid=((data[i + 11] & 0x1F) << 8) | data[i + 12],
)
self.programs.append(p)
def __str__(self):
return "Pat(programs: [{}]".format(
"".join(
"num:{}, pid:{}".format(x.program_num, x.program_map_pid)
for x in self.programs
)
)
class Pmt(Psi):
def __init__(self, data):
super().__init__(data)
self.pcr_pid = ((data[9] & 0x1F) << 8) | data[10]
program_info_len = (data[11] & 0x3) | data[12]
# Need to confirm that multiple sections are used for
# multiple program streams
self.pmt_descriptors = []
self.elementary_streams = []
count = 13
end_bytes = count + program_info_len
while count != end_bytes:
pmt_desc = _Descriptor(data[count:])
self.pmt_descriptors.append(pmt_desc)
count += len(pmt_desc)
# Now read sections until the end
while count != self.section_length:
es = _EsStream(data[count:])
self.elementary_streams.append(es)
count += len(es)
def __str__(self):
return "Pmt(pcr_pid={},descriptors={},elementary_streams={})".format(
self.pcr_pid,
"[{}]".format(",".join(str(x) for x in self.pmt_descriptors)),
"[{}]".format(",".join(str(x) for x in self.elementary_streams)),
)
class _EsStream:
def __init__(self, data):
self.stream_type = data[0]
self.elementary_pid = ((data[1] & 0x1F) << 8) | data[2]
self.pmt_descriptors = []
es_info_length = ((data[3] & 0x3) << 8) | data[4]
count = 5
exit_cond = count + es_info_length
while count != exit_cond:
pmt_desc = _Descriptor(data[count:])
count += len(pmt_desc)
self.pmt_descriptors.append(pmt_desc)
self._bytes = count
def __len__(self):
return self._bytes
def __str__(self):
return "EsStream(stream_type={},elementary_pid={},pmt_descriptors={})".format(
self.stream_type,
self.elementary_pid,
"[{}]".format(",".join(str(x) for x in self.pmt_descriptors)),
)
class _Descriptor:
def __init__(self, data):
self.tag = data[0]
length = data[1]
self.data = data[2 : length + 3]
self._bytes = 2 + length
def __len__(self):
return self._bytes
def __str__(self):
return "Descriptor(tag={}, data={})".format(self.tag, self.data)
| en | 0.744247 | # -*- coding: utf-8 -*- Base class for different PSI tables. Given a PSI packet payload this will parse out the table header and syntax section # Pointer field/filler bytes can be dropped(?) # When the pointer field is non-zero, this is the pointer field # number of alignment padding bytes set to 0xFF or the end of # the previous table section spanning across TS packets # TODO: Add this # Need to confirm that multiple sections are used for # multiple program streams # Now read sections until the end | 2.741336 | 3 |
setup.py | Sreejoy/CrawlerFriend | 2 | 6616636 | <reponame>Sreejoy/CrawlerFriend<filename>setup.py
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="CrawlerFriend",
version="1.0.11",
author="<NAME>",
author_email="<EMAIL>",
description="A light weight crawler which gives search results in HTML form or in Dictionary form,"
" given urls and keywords.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Sreejoy/CrawlerFriend",
packages=setuptools.find_packages(),
install_requires=[
'requests',
'beautifulsoup4'
],
classifiers=(
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
) | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="CrawlerFriend",
version="1.0.11",
author="<NAME>",
author_email="<EMAIL>",
description="A light weight crawler which gives search results in HTML form or in Dictionary form,"
" given urls and keywords.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Sreejoy/CrawlerFriend",
packages=setuptools.find_packages(),
install_requires=[
'requests',
'beautifulsoup4'
],
classifiers=(
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
) | none | 1 | 1.562463 | 2 | |
_GTW/__test__/Entity_Reload.py | Tapyr/tapyr | 6 | 6616637 | <reponame>Tapyr/tapyr
# -*- coding: utf-8 -*-
# Copyright (C) 2010-2014 Mag. <NAME> All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. <EMAIL>
# ****************************************************************************
# This module is part of the package GTW.__test__.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# GTW.__test__.Entity_Reload
#
# Purpose
# Test entity reloading after a scope.commit and changes in a different
# process
#
# Revision Dates
# 15-Jun-2012 (MG) Creation
# 27-Jun-2012 (CT) Test `b1.name` before querying for `b3`
# ««revision-date»»···
#--
_test_code = """
>>> scope = Scaffold.scope (%(p1)s, %(n1)s) # doctest:+ELLIPSIS
Creating new scope MOMT__...
>>> SRM = scope.SRM
>>> opti = SRM.Boat_Class (name = "Optimist", max_crew = 1)
>>> laser = SRM.Boat_Class (name = "Laser", max_crew = 1)
>>> b1 = SRM.Boat (u'Optimist', u"1107", u"AUT", raw = True) ### 1
>>> b2 = SRM.Boat (u"Laser", "42", None, "OE", raw = True)
>>> laser.max_crew ### before commit
1
>>> prepr (b1.name) ### before commit
''
>>> scope.commit ()
>>> modify_scope (%(p1)s, %(n1)s)
>>> laser.max_crew ### after change
2
>>> prepr (b1.name) ### after change
'My Boat'
>>> b3 = scope.SRM.Boat.query (nation = u"AUT").one ()
>>> b3 is b1
True
"""
from _GTW.__test__.model import *
from multiprocessing import Process
_Ancestor_Essence = GTW.OMP.SRM.Boat
Scaffold.Backend_Parameters ["SQL"] = "'sqlite:///test.sqlite'"
Scaffold.Backend_Parameters ["sq"] = "'sqlite:///test.sqlite'"
def _modify_scope (* args) :
scope = Scaffold.scope (* args, create = False, verbose = False)
laser = scope.SRM.Boat_Class.query (name = u"laser").one ()
laser.max_crew = 2
boat = scope.SRM.Boat.query (nation = u"AUT").one ()
boat.name = "My Boat"
scope.commit ()
scope.destroy ()
# end def _modify_scope
def modify_scope (* args) :
if 1 :
p = Process (target = _modify_scope, args = args)
p.start ()
p.join ()
else :
_modify_scope (* args)
# end def modify_scope
__test__ = Scaffold.create_test_dict (_test_code, ignore = "HPS")
### __END__ GTW.__test__.Entity_Reload
| # -*- coding: utf-8 -*-
# Copyright (C) 2010-2014 Mag. <NAME> All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. <EMAIL>
# ****************************************************************************
# This module is part of the package GTW.__test__.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# GTW.__test__.Entity_Reload
#
# Purpose
# Test entity reloading after a scope.commit and changes in a different
# process
#
# Revision Dates
# 15-Jun-2012 (MG) Creation
# 27-Jun-2012 (CT) Test `b1.name` before querying for `b3`
# ««revision-date»»···
#--
_test_code = """
>>> scope = Scaffold.scope (%(p1)s, %(n1)s) # doctest:+ELLIPSIS
Creating new scope MOMT__...
>>> SRM = scope.SRM
>>> opti = SRM.Boat_Class (name = "Optimist", max_crew = 1)
>>> laser = SRM.Boat_Class (name = "Laser", max_crew = 1)
>>> b1 = SRM.Boat (u'Optimist', u"1107", u"AUT", raw = True) ### 1
>>> b2 = SRM.Boat (u"Laser", "42", None, "OE", raw = True)
>>> laser.max_crew ### before commit
1
>>> prepr (b1.name) ### before commit
''
>>> scope.commit ()
>>> modify_scope (%(p1)s, %(n1)s)
>>> laser.max_crew ### after change
2
>>> prepr (b1.name) ### after change
'My Boat'
>>> b3 = scope.SRM.Boat.query (nation = u"AUT").one ()
>>> b3 is b1
True
"""
from _GTW.__test__.model import *
from multiprocessing import Process
_Ancestor_Essence = GTW.OMP.SRM.Boat
Scaffold.Backend_Parameters ["SQL"] = "'sqlite:///test.sqlite'"
Scaffold.Backend_Parameters ["sq"] = "'sqlite:///test.sqlite'"
def _modify_scope (* args) :
scope = Scaffold.scope (* args, create = False, verbose = False)
laser = scope.SRM.Boat_Class.query (name = u"laser").one ()
laser.max_crew = 2
boat = scope.SRM.Boat.query (nation = u"AUT").one ()
boat.name = "My Boat"
scope.commit ()
scope.destroy ()
# end def _modify_scope
def modify_scope (* args) :
if 1 :
p = Process (target = _modify_scope, args = args)
p.start ()
p.join ()
else :
_modify_scope (* args)
# end def modify_scope
__test__ = Scaffold.create_test_dict (_test_code, ignore = "HPS")
### __END__ GTW.__test__.Entity_Reload | en | 0.481325 | # -*- coding: utf-8 -*- # Copyright (C) 2010-2014 Mag. <NAME> All rights reserved # Glasauergasse 32, A--1130 Wien, Austria. <EMAIL> # **************************************************************************** # This module is part of the package GTW.__test__. # # This module is licensed under the terms of the BSD 3-Clause License # <http://www.c-tanzer.at/license/bsd_3c.html>. # **************************************************************************** # #++ # Name # GTW.__test__.Entity_Reload # # Purpose # Test entity reloading after a scope.commit and changes in a different # process # # Revision Dates # 15-Jun-2012 (MG) Creation # 27-Jun-2012 (CT) Test `b1.name` before querying for `b3` # ««revision-date»»··· #-- >>> scope = Scaffold.scope (%(p1)s, %(n1)s) # doctest:+ELLIPSIS Creating new scope MOMT__... >>> SRM = scope.SRM >>> opti = SRM.Boat_Class (name = "Optimist", max_crew = 1) >>> laser = SRM.Boat_Class (name = "Laser", max_crew = 1) >>> b1 = SRM.Boat (u'Optimist', u"1107", u"AUT", raw = True) ### 1 >>> b2 = SRM.Boat (u"Laser", "42", None, "OE", raw = True) >>> laser.max_crew ### before commit 1 >>> prepr (b1.name) ### before commit '' >>> scope.commit () >>> modify_scope (%(p1)s, %(n1)s) >>> laser.max_crew ### after change 2 >>> prepr (b1.name) ### after change 'My Boat' >>> b3 = scope.SRM.Boat.query (nation = u"AUT").one () >>> b3 is b1 True # end def _modify_scope # end def modify_scope ### __END__ GTW.__test__.Entity_Reload | 1.964612 | 2 |
paystack/views.py | mymi14s/django-paystack | 3 | 6616638 | import json, hmac, hashlib
from django.shortcuts import redirect, reverse
from django.http import JsonResponse
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.http import HttpResponse
from django.views.generic import RedirectView, TemplateView
# Create your views here.
from . import settings, signals, utils
from .signals import payment_verified
from .utils import load_lib
from .models import PaymentHistory
def verify_payment(request, order):
amount = request.GET.get('amount')
txrf = request.GET.get('trxref')
PaystackAPI = load_lib()
paystack_instance = PaystackAPI()
response = paystack_instance.verify_payment(txrf, amount=int(amount))
if response[0]:
payment_verified.send(
sender=PaystackAPI,
ref=txrf,
amount=int(amount) / 100,
order=order)
return redirect(
reverse('paystack:successful_verification', args=[order]))
return redirect(reverse('paystack:failed_verification', args=[order]))
class FailedView(RedirectView):
permanent = True
def get_redirect_url(self, *args, **kwargs):
if settings.PAYSTACK_FAILED_URL == 'paystack:failed_page':
return reverse(settings.PAYSTACK_FAILED_URL)
return settings.PAYSTACK_FAILED_URL
def success_redirect_view(request, order_id):
url = settings.PAYSTACK_SUCCESS_URL
if url == 'paystack:success_page':
url = reverse(url)
return redirect(url, permanent=True)
def failure_redirect_view(request, order_id):
url = settings.PAYSTACK_FAILED_URL
if url == 'paystack:failed_page':
url = reverse(url)
return redirect(url, permanent=True)
class SuccessView(RedirectView):
permanent = True
def get_redirect_url(self, *args, **kwargs):
if not settings.PAYSTACK_SUCCESS_URL:
return reverse('paystack:success_page')
return settings.PAYSTACK_SUCCESS_URL
# def webhook_view(request):
# # ensure that all parameters are in the bytes representation
# PaystackAPI = load_lib()
# paystack_instance = PaystackAPI()
# signature = request.META['HTTP_X_PAYSTACK_SIGNATURE']
# paystack_instance.webhook_api.verify(
# signature, request.body, full_auth=True)
# # digest = utils.generate_digest(request.body)
# # if digest == signature:
# # payload = json.loads(request.body)
# # signals.event_signal.send(
# # sender=request, event=payload['event'], data=payload['data'])
# return JsonResponse({'status': "Success"})
# p_event, p_payment_date, p_reference, p_email, p_json_body = None, None, None, None, None
def payment_state():
""" Keep patment information state. """
p_event = None
p_payment_date = None
p_reference = None
p_email = None
p_json_body = None
def update_payment(json_body):
"""Update payment status based on data from json_body."""
event = json_body['event']
ndata = json_body['data']
reference = ndata['reference']
email = ndata['customer']['email']
payment_date = ndata['paid_at']
if event == 'charge.success':
# status = 'paid'
# payment_date = ndata['paid_at']
# # payment_user = model.objects.filter(
# reference=reference).update(
# status=status, payment_date=payment_date
# )
PaymentHistory.objects.create(
email=email,
reference=reference, data=json_body
)
payment_state.p_event = event
payment_state.p_payment_date = payment_date
payment_state.p_reference = reference
payment_state.p_email = email
payment_state.p_json_body = json_body
else:
# status = str(event.replace('charge.', ''))
# payment_date = ndata['paid_at']
# # payment_user = model.objects.filter(
# reference=reference).update(
# status=status, payment_date=payment_date
# )
PaymentHistory.objects.create(
email=email,
reference=reference, data=json_body
)
payment_state.p_event = event
payment_state.p_payment_date = payment_date
payment_state.p_reference = reference
payment_state.p_email = email
payment_state.p_json_body = json_body
@require_POST
@csrf_exempt
def webhook_endpoint(request):
"""
The function takes an http request object
containing the json data from paystack webhook client.
Django's http request and response object was used
for this example.
"""
paystack_sk = settings.PAYSTACK_SECRET_KEY# "sk_fromthepaystackguys"
json_body = json.loads(request.body)
computed_hmac = hmac.new(
bytes(paystack_sk, 'utf-8'),
str.encode(request.body.decode('utf-8')),
digestmod=hashlib.sha512
).hexdigest()
if 'HTTP_X_PAYSTACK_SIGNATURE' in request.META:
if request.META['HTTP_X_PAYSTACK_SIGNATURE'] == computed_hmac:
# print(request.META)
update_payment(json_body) #
payload = json_body
signals.event_signal.send(
sender=request, event=payload['event'], data=payload['data'])
return HttpResponse(status=200)
# Not successful
update_payment(json_body)
payload = json_body
signals.event_signal.send(
sender=request, event=payload['event'], data=payload['data'])
# print('failed\n', json_body)
return HttpResponse(status=400) #non 200
# event, payment_date, reference, json_body = update_payment
# print(event, payment_date, reference, json_body)
| import json, hmac, hashlib
from django.shortcuts import redirect, reverse
from django.http import JsonResponse
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.http import HttpResponse
from django.views.generic import RedirectView, TemplateView
# Create your views here.
from . import settings, signals, utils
from .signals import payment_verified
from .utils import load_lib
from .models import PaymentHistory
def verify_payment(request, order):
amount = request.GET.get('amount')
txrf = request.GET.get('trxref')
PaystackAPI = load_lib()
paystack_instance = PaystackAPI()
response = paystack_instance.verify_payment(txrf, amount=int(amount))
if response[0]:
payment_verified.send(
sender=PaystackAPI,
ref=txrf,
amount=int(amount) / 100,
order=order)
return redirect(
reverse('paystack:successful_verification', args=[order]))
return redirect(reverse('paystack:failed_verification', args=[order]))
class FailedView(RedirectView):
permanent = True
def get_redirect_url(self, *args, **kwargs):
if settings.PAYSTACK_FAILED_URL == 'paystack:failed_page':
return reverse(settings.PAYSTACK_FAILED_URL)
return settings.PAYSTACK_FAILED_URL
def success_redirect_view(request, order_id):
url = settings.PAYSTACK_SUCCESS_URL
if url == 'paystack:success_page':
url = reverse(url)
return redirect(url, permanent=True)
def failure_redirect_view(request, order_id):
url = settings.PAYSTACK_FAILED_URL
if url == 'paystack:failed_page':
url = reverse(url)
return redirect(url, permanent=True)
class SuccessView(RedirectView):
permanent = True
def get_redirect_url(self, *args, **kwargs):
if not settings.PAYSTACK_SUCCESS_URL:
return reverse('paystack:success_page')
return settings.PAYSTACK_SUCCESS_URL
# def webhook_view(request):
# # ensure that all parameters are in the bytes representation
# PaystackAPI = load_lib()
# paystack_instance = PaystackAPI()
# signature = request.META['HTTP_X_PAYSTACK_SIGNATURE']
# paystack_instance.webhook_api.verify(
# signature, request.body, full_auth=True)
# # digest = utils.generate_digest(request.body)
# # if digest == signature:
# # payload = json.loads(request.body)
# # signals.event_signal.send(
# # sender=request, event=payload['event'], data=payload['data'])
# return JsonResponse({'status': "Success"})
# p_event, p_payment_date, p_reference, p_email, p_json_body = None, None, None, None, None
def payment_state():
""" Keep patment information state. """
p_event = None
p_payment_date = None
p_reference = None
p_email = None
p_json_body = None
def update_payment(json_body):
"""Update payment status based on data from json_body."""
event = json_body['event']
ndata = json_body['data']
reference = ndata['reference']
email = ndata['customer']['email']
payment_date = ndata['paid_at']
if event == 'charge.success':
# status = 'paid'
# payment_date = ndata['paid_at']
# # payment_user = model.objects.filter(
# reference=reference).update(
# status=status, payment_date=payment_date
# )
PaymentHistory.objects.create(
email=email,
reference=reference, data=json_body
)
payment_state.p_event = event
payment_state.p_payment_date = payment_date
payment_state.p_reference = reference
payment_state.p_email = email
payment_state.p_json_body = json_body
else:
# status = str(event.replace('charge.', ''))
# payment_date = ndata['paid_at']
# # payment_user = model.objects.filter(
# reference=reference).update(
# status=status, payment_date=payment_date
# )
PaymentHistory.objects.create(
email=email,
reference=reference, data=json_body
)
payment_state.p_event = event
payment_state.p_payment_date = payment_date
payment_state.p_reference = reference
payment_state.p_email = email
payment_state.p_json_body = json_body
@require_POST
@csrf_exempt
def webhook_endpoint(request):
"""
The function takes an http request object
containing the json data from paystack webhook client.
Django's http request and response object was used
for this example.
"""
paystack_sk = settings.PAYSTACK_SECRET_KEY# "sk_fromthepaystackguys"
json_body = json.loads(request.body)
computed_hmac = hmac.new(
bytes(paystack_sk, 'utf-8'),
str.encode(request.body.decode('utf-8')),
digestmod=hashlib.sha512
).hexdigest()
if 'HTTP_X_PAYSTACK_SIGNATURE' in request.META:
if request.META['HTTP_X_PAYSTACK_SIGNATURE'] == computed_hmac:
# print(request.META)
update_payment(json_body) #
payload = json_body
signals.event_signal.send(
sender=request, event=payload['event'], data=payload['data'])
return HttpResponse(status=200)
# Not successful
update_payment(json_body)
payload = json_body
signals.event_signal.send(
sender=request, event=payload['event'], data=payload['data'])
# print('failed\n', json_body)
return HttpResponse(status=400) #non 200
# event, payment_date, reference, json_body = update_payment
# print(event, payment_date, reference, json_body)
| en | 0.569779 | # Create your views here. # def webhook_view(request): # # ensure that all parameters are in the bytes representation # PaystackAPI = load_lib() # paystack_instance = PaystackAPI() # signature = request.META['HTTP_X_PAYSTACK_SIGNATURE'] # paystack_instance.webhook_api.verify( # signature, request.body, full_auth=True) # # digest = utils.generate_digest(request.body) # # if digest == signature: # # payload = json.loads(request.body) # # signals.event_signal.send( # # sender=request, event=payload['event'], data=payload['data']) # return JsonResponse({'status': "Success"}) # p_event, p_payment_date, p_reference, p_email, p_json_body = None, None, None, None, None Keep patment information state. Update payment status based on data from json_body. # status = 'paid' # payment_date = ndata['paid_at'] # # payment_user = model.objects.filter( # reference=reference).update( # status=status, payment_date=payment_date # ) # status = str(event.replace('charge.', '')) # payment_date = ndata['paid_at'] # # payment_user = model.objects.filter( # reference=reference).update( # status=status, payment_date=payment_date # ) The function takes an http request object containing the json data from paystack webhook client. Django's http request and response object was used for this example. # "sk_fromthepaystackguys" # print(request.META) # # Not successful # print('failed\n', json_body) #non 200 # event, payment_date, reference, json_body = update_payment # print(event, payment_date, reference, json_body) | 2.08772 | 2 |
torcms/handlers/collect_handler.py | seaioheroes/TorCMS-master | 0 | 6616639 | <filename>torcms/handlers/collect_handler.py
# -*- coding:utf-8 -*-
'''
For User collection
'''
import json
import tornado.web
from torcms.core.base_handler import BaseHandler
from torcms.core import tools
from torcms.model.collect_model import MCollect
from torcms.core.tools import logger
from config import CMS_CFG
class CollectHandler(BaseHandler):
'''
For User collection
'''
def initialize(self, **kwargs):
super(CollectHandler, self).initialize()
def get(self, *args, **kwargs):
url_str = args[0]
if url_str:
url_arr = self.parse_url(url_str)
else:
return False
if url_str == 'list':
self.show_list(url_str)
elif len(url_arr) == 2:
self.show_list(url_arr[0], url_arr[1])
elif len(url_arr) == 1 and (len(url_str) == 4 or len(url_str) == 5):
if self.get_current_user():
self.add_or_update(url_str)
else:
self.set_status(403)
return False
@tornado.web.authenticated
def add_or_update(self, app_id):
'''
Add or update the category.
'''
logger.info('Collect info: user-{0}, uid-{1}'.format(self.userinfo.uid, app_id))
MCollect.add_or_update(self.userinfo.uid, app_id)
out_dic = {'success': True}
return json.dump(out_dic, self)
@tornado.web.authenticated
def show_list(self, the_list, cur_p=''):
'''
List of the user collections.
'''
current_page_num = int(cur_p) if cur_p else 1
current_page_num = 1 if current_page_num < 1 else current_page_num
num_of_cat = MCollect.count_of_user(self.userinfo.uid)
page_num = int(num_of_cat / CMS_CFG['list_num']) + 1
kwd = {'current_page': current_page_num}
self.render('misc/collect/list.html',
recs_collect=MCollect.query_pager_by_all(self.userinfo.uid,
current_page_num).naive(),
pager=tools.gen_pager_purecss('/collect/{0}'.format(the_list),
page_num,
current_page_num),
userinfo=self.userinfo,
cfg=CMS_CFG,
kwd=kwd)
| <filename>torcms/handlers/collect_handler.py
# -*- coding:utf-8 -*-
'''
For User collection
'''
import json
import tornado.web
from torcms.core.base_handler import BaseHandler
from torcms.core import tools
from torcms.model.collect_model import MCollect
from torcms.core.tools import logger
from config import CMS_CFG
class CollectHandler(BaseHandler):
'''
For User collection
'''
def initialize(self, **kwargs):
super(CollectHandler, self).initialize()
def get(self, *args, **kwargs):
url_str = args[0]
if url_str:
url_arr = self.parse_url(url_str)
else:
return False
if url_str == 'list':
self.show_list(url_str)
elif len(url_arr) == 2:
self.show_list(url_arr[0], url_arr[1])
elif len(url_arr) == 1 and (len(url_str) == 4 or len(url_str) == 5):
if self.get_current_user():
self.add_or_update(url_str)
else:
self.set_status(403)
return False
@tornado.web.authenticated
def add_or_update(self, app_id):
'''
Add or update the category.
'''
logger.info('Collect info: user-{0}, uid-{1}'.format(self.userinfo.uid, app_id))
MCollect.add_or_update(self.userinfo.uid, app_id)
out_dic = {'success': True}
return json.dump(out_dic, self)
@tornado.web.authenticated
def show_list(self, the_list, cur_p=''):
'''
List of the user collections.
'''
current_page_num = int(cur_p) if cur_p else 1
current_page_num = 1 if current_page_num < 1 else current_page_num
num_of_cat = MCollect.count_of_user(self.userinfo.uid)
page_num = int(num_of_cat / CMS_CFG['list_num']) + 1
kwd = {'current_page': current_page_num}
self.render('misc/collect/list.html',
recs_collect=MCollect.query_pager_by_all(self.userinfo.uid,
current_page_num).naive(),
pager=tools.gen_pager_purecss('/collect/{0}'.format(the_list),
page_num,
current_page_num),
userinfo=self.userinfo,
cfg=CMS_CFG,
kwd=kwd)
| en | 0.701925 | # -*- coding:utf-8 -*- For User collection For User collection Add or update the category. List of the user collections. | 2.019509 | 2 |
src/app.py | RodrigoSantosRodrigues/api-jogo-da-velha-python | 0 | 6616640 | # -*- coding: utf-8 -*-
# src/app.py
"""
API
------------------------------------------------------------------------
Create app
------------------------------------------------------------------------
"""
from flask import Flask, Response
from flask_cors import CORS
from flask_swagger_ui import get_swaggerui_blueprint
from .config import app_config
from .controllers.VelhaController import velha_api as velha_blueprint
def create_app(env_name):
"""
param: env_name
DOC API USING SWAGGER UI
Create app
"""
# app initiliazation
app = Flask(__name__)
#CORS(app, resources={r"/api/*": {"origins": "*"}})
CORS(app)
app.config.from_object(app_config[env_name])
### swagger specific ###
swagger_url = '/apidocs'
api_url = '/static/api/api.yml'
swagger_blueprint = get_swaggerui_blueprint(
swagger_url,
api_url,
config={
'app_name': "API Jogo da Velha",
'layout': "BaseLayout",
'filter': True
}
)
app.register_blueprint(swagger_blueprint, url_prefix=swagger_url)
### end swagger specific ###
app.register_blueprint(velha_blueprint, url_prefix='/v1/api/game')
@app.route('/', methods=['GET'])
def index():
"""
Home
"""
return Response(
mimetype="application/json",
response={"Bem vindo ao Jogo da Velha - Documentação: /apidocs"},
status=200
)
return app
| # -*- coding: utf-8 -*-
# src/app.py
"""
API
------------------------------------------------------------------------
Create app
------------------------------------------------------------------------
"""
from flask import Flask, Response
from flask_cors import CORS
from flask_swagger_ui import get_swaggerui_blueprint
from .config import app_config
from .controllers.VelhaController import velha_api as velha_blueprint
def create_app(env_name):
"""
param: env_name
DOC API USING SWAGGER UI
Create app
"""
# app initiliazation
app = Flask(__name__)
#CORS(app, resources={r"/api/*": {"origins": "*"}})
CORS(app)
app.config.from_object(app_config[env_name])
### swagger specific ###
swagger_url = '/apidocs'
api_url = '/static/api/api.yml'
swagger_blueprint = get_swaggerui_blueprint(
swagger_url,
api_url,
config={
'app_name': "API Jogo da Velha",
'layout': "BaseLayout",
'filter': True
}
)
app.register_blueprint(swagger_blueprint, url_prefix=swagger_url)
### end swagger specific ###
app.register_blueprint(velha_blueprint, url_prefix='/v1/api/game')
@app.route('/', methods=['GET'])
def index():
"""
Home
"""
return Response(
mimetype="application/json",
response={"Bem vindo ao Jogo da Velha - Documentação: /apidocs"},
status=200
)
return app
| en | 0.266276 | # -*- coding: utf-8 -*- # src/app.py API
------------------------------------------------------------------------
Create app
------------------------------------------------------------------------ param: env_name
DOC API USING SWAGGER UI
Create app # app initiliazation #CORS(app, resources={r"/api/*": {"origins": "*"}}) ### swagger specific ### ### end swagger specific ### Home | 2.394962 | 2 |
cpu_load.py | harti768/Watch-my-Pie | 2 | 6616641 | import psutil
import Pids
import Memory
import send_email
import os
from uuid import getnode as get_mac
mac = get_mac()
cpu_load_finish = 0
cpu_loadinglevel = False
while_loop = True
while(while_loop):
cpu_load_warning = 70 #70 #value for cpu warning message
cpu_interval = 6 #intval for cpu_percant_reequest
def cpu_load():
count = psutil.cpu_count()
condition_cpu_loop = True
mem = Memory.Free_Space()
if mem < 200:
print "Achtung, sehr wenig Speicher frei!"
while (condition_cpu_loop == True):
cpu_load = psutil.cpu_percent(interval=cpu_interval)
print(cpu_load)
cpu_load_finish = cpu_load
if(cpu_load > cpu_load_warning):
condition_cpu_loop = False
print("Warning Warning")
print Pids.Pi(count, cpu_load_finish)
return(cpu_load)
print(cpu_load())
#print("test1" + str(cpu_load_finish))
if (cpu_load >= 80) & (cpu_loadinglevel == False):
#print(cpu_load + " foo")
send_email.main("DANGER DANGER! Ihr Pi " + (str(mac)) + " ist zu 80% ausgelastet - sofern sie es nicht willentlich tun, untersuchen sie ihn auf Schadsoftware.", "Danger!")
#print("test2")
cpu_loadinglevel = True
if (cpu_load_finish >= 95) & (cpu_loadinglevel == True):
#print(cpu_load + " foo")
send_email.main("DANGER DANGER DANGER! Ihr Pi " + (str(mac)) + "ist bereits zu 95% ausgelastet, er wird sich bald abschalten! Sichern sie ihre Daten, sonst koennte es zu Datenverlusten kommen!", "Danger! Danger!")
print("test")
cpu_loadinglevel = False
while_loop = False
break
Print("Programm wird beendet...")
| import psutil
import Pids
import Memory
import send_email
import os
from uuid import getnode as get_mac
mac = get_mac()
cpu_load_finish = 0
cpu_loadinglevel = False
while_loop = True
while(while_loop):
cpu_load_warning = 70 #70 #value for cpu warning message
cpu_interval = 6 #intval for cpu_percant_reequest
def cpu_load():
count = psutil.cpu_count()
condition_cpu_loop = True
mem = Memory.Free_Space()
if mem < 200:
print "Achtung, sehr wenig Speicher frei!"
while (condition_cpu_loop == True):
cpu_load = psutil.cpu_percent(interval=cpu_interval)
print(cpu_load)
cpu_load_finish = cpu_load
if(cpu_load > cpu_load_warning):
condition_cpu_loop = False
print("Warning Warning")
print Pids.Pi(count, cpu_load_finish)
return(cpu_load)
print(cpu_load())
#print("test1" + str(cpu_load_finish))
if (cpu_load >= 80) & (cpu_loadinglevel == False):
#print(cpu_load + " foo")
send_email.main("DANGER DANGER! Ihr Pi " + (str(mac)) + " ist zu 80% ausgelastet - sofern sie es nicht willentlich tun, untersuchen sie ihn auf Schadsoftware.", "Danger!")
#print("test2")
cpu_loadinglevel = True
if (cpu_load_finish >= 95) & (cpu_loadinglevel == True):
#print(cpu_load + " foo")
send_email.main("DANGER DANGER DANGER! Ihr Pi " + (str(mac)) + "ist bereits zu 95% ausgelastet, er wird sich bald abschalten! Sichern sie ihre Daten, sonst koennte es zu Datenverlusten kommen!", "Danger! Danger!")
print("test")
cpu_loadinglevel = False
while_loop = False
break
Print("Programm wird beendet...")
| en | 0.203055 | #70 #value for cpu warning message #intval for cpu_percant_reequest #print("test1" + str(cpu_load_finish)) #print(cpu_load + " foo") #print("test2") #print(cpu_load + " foo") | 2.748286 | 3 |
closed/FuriosaAI/code/quantization/furiosa_sdk_quantizer/frontend/onnx/transformer/fuse_conv.py | ctuning/inference_results_v1.1 | 12 | 6616642 | <gh_stars>10-100
import abc
import onnx
from furiosa_sdk_quantizer.interfaces.transformer import Transformer
from furiosa_sdk_quantizer.frontend.onnx.transformer import ONNXTransformer
from furiosa_sdk_quantizer.frontend.onnx.quantizer.utils import attribute_to_kwargs
class FuseConv(Transformer):
def transform(self, model: onnx.ModelProto) -> onnx.ModelProto:
for transformer in [
Pattern_1,
Pattern_2,
Pattern_3,
]:
model = transformer(model).transform()
return model
class Pattern_1(ONNXTransformer, abc.ABC):
"""
transform
prev --> MatMul --> Add --> next
to
prev --> Unsqueeze --> Conv --> Squeeze --> next
if 1. MatMul.ndim == 2
2. MatMul must have at most one initializer
3. Add must have at most one initializer
"""
pattern_to_match = ["MatMul", "Add"]
def pattern_matching(self, base_node):
inputs = base_node.input
matched_nodes = self.pattern_matcher(base_node, self.pattern_to_match)
if not matched_nodes:
return inputs
if not self.pattern_condition_checker(matched_nodes):
return inputs
top_node = matched_nodes[0]
self.transform_to_fuse(
matched_nodes,
nodes_to_add=[*self.make_nodes(**self.get_new_node_args(matched_nodes))],
inits_to_add=[*self.make_initializers(**self.get_new_init_args(matched_nodes))],
vis_to_add=[*self.make_value_infos(**self.get_new_vi_args(matched_nodes))],
)
return top_node.input
def pattern_condition_checker(self, nodes_to_check):
top_node, base_node = nodes_to_check
if not self.check_condition_1(top_node.output[0]):
return False
if not self.check_condition_2(top_node):
return False
if not self.check_condition_2(base_node):
return False
def check_condition_1(self, tensor_name):
if len(self.get_value_info_shape(tensor_name)) == 2:
return True
return False
def check_condition_2(self, node):
num_init = 0
for node_input in node.input:
if node_input in self.initializer_map:
num_init += 1
if num_init == 1:
return True
return False
def get_new_vi_args(self, matched_nodes):
top_node = matched_nodes[0]
base_node = matched_nodes[-1]
fnode_input = self.get_data_node_input(top_node)
fnode_output = base_node.output[0]
return {"node_input": fnode_input, "node_output": fnode_output}
def get_new_init_args(self, matched_nodes):
top_node = matched_nodes[0]
base_node = matched_nodes[-1]
fw_input = self.get_init_node_input(top_node)
fb_input = self.get_init_node_input(base_node)
return {"w_input": fw_input, "b_input": fb_input}
def get_new_node_args(self, matched_nodes):
args = dict()
args.update(self.get_new_vi_args(matched_nodes))
args.update(self.get_new_init_args(matched_nodes))
return args
def make_nodes(self, node_input, node_output, w_input, b_input, **kwargs):
unsqueeze_node = self.make_node(
"Unsqueeze",
inputs=[node_input],
outputs=[node_input + "_unsqueezed"],
name=node_input + "_1",
**{"axes": [2, 3]}
)
conv_node = self.make_node(
"Conv",
inputs=[unsqueeze_node.output[0], w_input + "_fused", b_input + "_fused"],
outputs=[node_input + "_fused"],
name=node_input + "_2",
**{
"dilations": [1, 1],
"group": 1,
"kernel_shape": [1, 1],
"pads": [0, 0, 0, 0],
"strides": [1, 1],
}
)
squeeze_node = self.make_node(
"Squeeze",
inputs=[conv_node.output[0]],
outputs=[node_output],
name=node_input + "_3",
**{"axes": [2, 3]}
)
return unsqueeze_node, conv_node, squeeze_node
def make_initializers(self, w_input, b_input=None, **kwargs):
new_inits = []
w_arr = self.get_initializer_array(w_input)
new_w_arr = self.weight_transformation(w_arr, **kwargs)
new_w_init = self.make_initializer_from_array(new_w_arr, w_input + "_fused")
new_inits.append(new_w_init)
if b_input:
b_arr = self.get_initializer_array(b_input)
new_b_init = self.make_initializer_from_array(b_arr, b_input + "_fused")
new_inits.append(new_b_init)
return new_inits
def weight_transformation(self, w_arr, **kwargs):
c, n = w_arr.shape
new_w_arr = w_arr.transpose().reshape(n, c, 1, 1)
return new_w_arr
def make_value_infos(self, node_input, node_output):
conv_input_vi = self.make_tensor_value_info(
node_input + "_unsqueezed",
onnx.TensorProto.FLOAT,
self.get_value_info_shape(node_input) + [1, 1],
)
conv_output_vi = self.make_tensor_value_info(
node_input + "_fused",
onnx.TensorProto.FLOAT,
self.get_value_info_shape(node_output) + [1, 1],
)
return conv_input_vi, conv_output_vi
class Pattern_2(Pattern_1, abc.ABC):
"""
transform
prev --> Gemm --> next
to
prev --> Unsqueeze --> Conv --> Squeeze --> next
if 1. one of Gemm.A and Gemm.B must have initializer
2. Gemm.C must have initializer if defined
"""
pattern_to_match = ["Gemm"]
def pattern_condition_checker(self, nodes_to_check):
node = nodes_to_check[0]
if not self.check_condition_3(node):
return False
if not self.check_condition_4(node):
return False
return True
def check_condition_3(self, node):
num_init = 0
for idx, node_input in enumerate(node.input):
if idx == 2:
break
if node_input in self.initializer_map:
num_init += 1
if num_init == 1:
return True
return False
def check_condition_4(self, node):
if len(node.input) == 3:
if node.input[2] not in self.initializer_map:
return False
return True
def get_new_init_args(self, matched_nodes):
node = matched_nodes[0]
fw_input = node.input[1]
fb_input = None
if len(node.input) == 3:
fb_input = node.input[2]
args = {"w_input": fw_input, "b_input": fb_input}
args.update(self.get_attrs(node))
return args
def get_new_vi_args(self, matched_nodes):
node = matched_nodes[0]
fnode_input = node.input[0]
fnode_output = node.output[0]
return {"node_input": fnode_input, "node_output": fnode_output}
def weight_transformation(self, w_arr, **kwargs):
transB = kwargs["transB"]
if transB == 0:
w_arr = w_arr.transpose()
n, c = w_arr.shape
new_arr = w_arr.reshape(n, c, 1, 1)
return new_arr
def get_attrs(self, node):
from furiosa_sdk_quantizer.frontend.onnx.quantizer.utils import attribute_to_kwargs
attrs = attribute_to_kwargs(node.attribute)
alpha = attrs["alpha"]
beta = attrs["beta"]
assert alpha == beta == 1.0, "Assume alpha = beta = 1.0"
transB = attrs["transB"]
return {"transB": transB}
class Pattern_3(ONNXTransformer, abc.ABC):
"""
transform
prev --> Conv --> Add --> next
to
prev --> Conv --> next
if len(Conv.input) == 2
"""
pattern_to_match = ["Conv", "Add"]
def pattern_matching(self, base_node):
inputs = base_node.input
matched_nodes = self.pattern_matcher(base_node, self.pattern_to_match)
if not matched_nodes:
return inputs
if not self.pattern_condition_checker(matched_nodes):
return inputs
top_node, base_node = matched_nodes
self.transform_to_fuse(
matched_nodes,
nodes_to_add=[self.make_nodes(*matched_nodes)],
inits_to_add=[self.make_initializers(base_node)],
)
return top_node.input
def pattern_condition_checker(self, nodes_to_check):
top_node, _ = nodes_to_check
if not self.check_condition_1(top_node):
return False
return True
def check_condition_1(self, node):
"""
check if Conv has bias input
"""
if len(node.input) == 2:
return True
return False
def make_nodes(self, top_node, base_node):
conv_node = self.make_node(
"Conv",
inputs=[*top_node.input, self.get_init_node_input(base_node) + "_fused"],
outputs=[base_node.output[0]],
name=top_node.name,
**attribute_to_kwargs(top_node.attribute)
)
return conv_node
def make_initializers(self, base_node):
b_input = self.get_init_node_input(base_node)
b_arr = self.get_initializer_array(b_input)
new_b_init = self.make_initializer_from_array(b_arr.flatten(), b_input + "_fused")
return new_b_init
| import abc
import onnx
from furiosa_sdk_quantizer.interfaces.transformer import Transformer
from furiosa_sdk_quantizer.frontend.onnx.transformer import ONNXTransformer
from furiosa_sdk_quantizer.frontend.onnx.quantizer.utils import attribute_to_kwargs
class FuseConv(Transformer):
def transform(self, model: onnx.ModelProto) -> onnx.ModelProto:
for transformer in [
Pattern_1,
Pattern_2,
Pattern_3,
]:
model = transformer(model).transform()
return model
class Pattern_1(ONNXTransformer, abc.ABC):
"""
transform
prev --> MatMul --> Add --> next
to
prev --> Unsqueeze --> Conv --> Squeeze --> next
if 1. MatMul.ndim == 2
2. MatMul must have at most one initializer
3. Add must have at most one initializer
"""
pattern_to_match = ["MatMul", "Add"]
def pattern_matching(self, base_node):
inputs = base_node.input
matched_nodes = self.pattern_matcher(base_node, self.pattern_to_match)
if not matched_nodes:
return inputs
if not self.pattern_condition_checker(matched_nodes):
return inputs
top_node = matched_nodes[0]
self.transform_to_fuse(
matched_nodes,
nodes_to_add=[*self.make_nodes(**self.get_new_node_args(matched_nodes))],
inits_to_add=[*self.make_initializers(**self.get_new_init_args(matched_nodes))],
vis_to_add=[*self.make_value_infos(**self.get_new_vi_args(matched_nodes))],
)
return top_node.input
def pattern_condition_checker(self, nodes_to_check):
top_node, base_node = nodes_to_check
if not self.check_condition_1(top_node.output[0]):
return False
if not self.check_condition_2(top_node):
return False
if not self.check_condition_2(base_node):
return False
def check_condition_1(self, tensor_name):
if len(self.get_value_info_shape(tensor_name)) == 2:
return True
return False
def check_condition_2(self, node):
num_init = 0
for node_input in node.input:
if node_input in self.initializer_map:
num_init += 1
if num_init == 1:
return True
return False
def get_new_vi_args(self, matched_nodes):
top_node = matched_nodes[0]
base_node = matched_nodes[-1]
fnode_input = self.get_data_node_input(top_node)
fnode_output = base_node.output[0]
return {"node_input": fnode_input, "node_output": fnode_output}
def get_new_init_args(self, matched_nodes):
top_node = matched_nodes[0]
base_node = matched_nodes[-1]
fw_input = self.get_init_node_input(top_node)
fb_input = self.get_init_node_input(base_node)
return {"w_input": fw_input, "b_input": fb_input}
def get_new_node_args(self, matched_nodes):
args = dict()
args.update(self.get_new_vi_args(matched_nodes))
args.update(self.get_new_init_args(matched_nodes))
return args
def make_nodes(self, node_input, node_output, w_input, b_input, **kwargs):
unsqueeze_node = self.make_node(
"Unsqueeze",
inputs=[node_input],
outputs=[node_input + "_unsqueezed"],
name=node_input + "_1",
**{"axes": [2, 3]}
)
conv_node = self.make_node(
"Conv",
inputs=[unsqueeze_node.output[0], w_input + "_fused", b_input + "_fused"],
outputs=[node_input + "_fused"],
name=node_input + "_2",
**{
"dilations": [1, 1],
"group": 1,
"kernel_shape": [1, 1],
"pads": [0, 0, 0, 0],
"strides": [1, 1],
}
)
squeeze_node = self.make_node(
"Squeeze",
inputs=[conv_node.output[0]],
outputs=[node_output],
name=node_input + "_3",
**{"axes": [2, 3]}
)
return unsqueeze_node, conv_node, squeeze_node
def make_initializers(self, w_input, b_input=None, **kwargs):
new_inits = []
w_arr = self.get_initializer_array(w_input)
new_w_arr = self.weight_transformation(w_arr, **kwargs)
new_w_init = self.make_initializer_from_array(new_w_arr, w_input + "_fused")
new_inits.append(new_w_init)
if b_input:
b_arr = self.get_initializer_array(b_input)
new_b_init = self.make_initializer_from_array(b_arr, b_input + "_fused")
new_inits.append(new_b_init)
return new_inits
def weight_transformation(self, w_arr, **kwargs):
c, n = w_arr.shape
new_w_arr = w_arr.transpose().reshape(n, c, 1, 1)
return new_w_arr
def make_value_infos(self, node_input, node_output):
conv_input_vi = self.make_tensor_value_info(
node_input + "_unsqueezed",
onnx.TensorProto.FLOAT,
self.get_value_info_shape(node_input) + [1, 1],
)
conv_output_vi = self.make_tensor_value_info(
node_input + "_fused",
onnx.TensorProto.FLOAT,
self.get_value_info_shape(node_output) + [1, 1],
)
return conv_input_vi, conv_output_vi
class Pattern_2(Pattern_1, abc.ABC):
"""
transform
prev --> Gemm --> next
to
prev --> Unsqueeze --> Conv --> Squeeze --> next
if 1. one of Gemm.A and Gemm.B must have initializer
2. Gemm.C must have initializer if defined
"""
pattern_to_match = ["Gemm"]
def pattern_condition_checker(self, nodes_to_check):
node = nodes_to_check[0]
if not self.check_condition_3(node):
return False
if not self.check_condition_4(node):
return False
return True
def check_condition_3(self, node):
num_init = 0
for idx, node_input in enumerate(node.input):
if idx == 2:
break
if node_input in self.initializer_map:
num_init += 1
if num_init == 1:
return True
return False
def check_condition_4(self, node):
if len(node.input) == 3:
if node.input[2] not in self.initializer_map:
return False
return True
def get_new_init_args(self, matched_nodes):
node = matched_nodes[0]
fw_input = node.input[1]
fb_input = None
if len(node.input) == 3:
fb_input = node.input[2]
args = {"w_input": fw_input, "b_input": fb_input}
args.update(self.get_attrs(node))
return args
def get_new_vi_args(self, matched_nodes):
node = matched_nodes[0]
fnode_input = node.input[0]
fnode_output = node.output[0]
return {"node_input": fnode_input, "node_output": fnode_output}
def weight_transformation(self, w_arr, **kwargs):
transB = kwargs["transB"]
if transB == 0:
w_arr = w_arr.transpose()
n, c = w_arr.shape
new_arr = w_arr.reshape(n, c, 1, 1)
return new_arr
def get_attrs(self, node):
from furiosa_sdk_quantizer.frontend.onnx.quantizer.utils import attribute_to_kwargs
attrs = attribute_to_kwargs(node.attribute)
alpha = attrs["alpha"]
beta = attrs["beta"]
assert alpha == beta == 1.0, "Assume alpha = beta = 1.0"
transB = attrs["transB"]
return {"transB": transB}
class Pattern_3(ONNXTransformer, abc.ABC):
"""
transform
prev --> Conv --> Add --> next
to
prev --> Conv --> next
if len(Conv.input) == 2
"""
pattern_to_match = ["Conv", "Add"]
def pattern_matching(self, base_node):
inputs = base_node.input
matched_nodes = self.pattern_matcher(base_node, self.pattern_to_match)
if not matched_nodes:
return inputs
if not self.pattern_condition_checker(matched_nodes):
return inputs
top_node, base_node = matched_nodes
self.transform_to_fuse(
matched_nodes,
nodes_to_add=[self.make_nodes(*matched_nodes)],
inits_to_add=[self.make_initializers(base_node)],
)
return top_node.input
def pattern_condition_checker(self, nodes_to_check):
top_node, _ = nodes_to_check
if not self.check_condition_1(top_node):
return False
return True
def check_condition_1(self, node):
"""
check if Conv has bias input
"""
if len(node.input) == 2:
return True
return False
def make_nodes(self, top_node, base_node):
conv_node = self.make_node(
"Conv",
inputs=[*top_node.input, self.get_init_node_input(base_node) + "_fused"],
outputs=[base_node.output[0]],
name=top_node.name,
**attribute_to_kwargs(top_node.attribute)
)
return conv_node
def make_initializers(self, base_node):
b_input = self.get_init_node_input(base_node)
b_arr = self.get_initializer_array(b_input)
new_b_init = self.make_initializer_from_array(b_arr.flatten(), b_input + "_fused")
return new_b_init | en | 0.716698 | transform prev --> MatMul --> Add --> next to prev --> Unsqueeze --> Conv --> Squeeze --> next if 1. MatMul.ndim == 2 2. MatMul must have at most one initializer 3. Add must have at most one initializer transform prev --> Gemm --> next to prev --> Unsqueeze --> Conv --> Squeeze --> next if 1. one of Gemm.A and Gemm.B must have initializer 2. Gemm.C must have initializer if defined transform prev --> Conv --> Add --> next to prev --> Conv --> next if len(Conv.input) == 2 check if Conv has bias input | 2.248569 | 2 |
app_auth/views.py | vadimkorr/scheduled-posts-manager | 0 | 6616643 | <reponame>vadimkorr/scheduled-posts-manager<filename>app_auth/views.py
from django.shortcuts import render, redirect
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import authenticate, login
def sign_in(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
login(request, user, 'appAuth.auth_backend.AuthBackend')
print(user)
# form = {
# 'username' : username,
# 'password': <PASSWORD>
# }
# form = AuthenticationForm(data=request.POST)
# if form.is_valid():
render(request, 'posts.html')
# redirect('/posts/')
else:
form = AuthenticationForm()
return render(request, 'sign-in.html', {'form': form})
def sign_up(request):
return render(request, 'sign-up.html')
| from django.shortcuts import render, redirect
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import authenticate, login
def sign_in(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
login(request, user, 'appAuth.auth_backend.AuthBackend')
print(user)
# form = {
# 'username' : username,
# 'password': <PASSWORD>
# }
# form = AuthenticationForm(data=request.POST)
# if form.is_valid():
render(request, 'posts.html')
# redirect('/posts/')
else:
form = AuthenticationForm()
return render(request, 'sign-in.html', {'form': form})
def sign_up(request):
return render(request, 'sign-up.html') | en | 0.316344 | # form = { # 'username' : username, # 'password': <PASSWORD> # } # form = AuthenticationForm(data=request.POST) # if form.is_valid(): # redirect('/posts/') | 2.449349 | 2 |
txpoloniex/const.py | congruency/txpoloniex | 1 | 6616644 | <filename>txpoloniex/const.py
"""
Constant values for the Poloniex API
"""
PUBLIC_API = 'https://poloniex.com/public'
PRIVATE_API = 'https://poloniex.com/tradingApi'
PUBLIC_COMMANDS = [
'returnTicker',
'return24hVolume',
'returnOrderBook',
'returnTradeHistory',
'returnChartData',
'returnCurrencies',
'returnLoanOrders',
]
PRIVATE_COMMANDS = [
'returnBalances',
'returnCompleteBalances',
'returnDepositAddresses',
'generateNewAddress',
'returnDepositsWithdrawals',
'returnOpenOrders',
'returnTradeHistory',
'returnAvailableAccountBalances',
'returnTradableBalances',
'returnOpenLoanOffers',
'returnOrderTrades',
'returnActiveLoans',
'returnLendingHistory',
'createLoanOffer',
'cancelLoanOffer',
'toggleAutoRenew',
'buy',
'sell',
'cancelOrder',
'moveOrder',
'withdraw',
'returnFeeInfo',
'transferBalance',
'returnMarginAccountSummary',
'marginBuy',
'marginSell',
'getMarginPosition',
'closeMarginPosition',
]
DATE_FORMAT='%Y-%m-%d %H:%M:%S'
| <filename>txpoloniex/const.py
"""
Constant values for the Poloniex API
"""
PUBLIC_API = 'https://poloniex.com/public'
PRIVATE_API = 'https://poloniex.com/tradingApi'
PUBLIC_COMMANDS = [
'returnTicker',
'return24hVolume',
'returnOrderBook',
'returnTradeHistory',
'returnChartData',
'returnCurrencies',
'returnLoanOrders',
]
PRIVATE_COMMANDS = [
'returnBalances',
'returnCompleteBalances',
'returnDepositAddresses',
'generateNewAddress',
'returnDepositsWithdrawals',
'returnOpenOrders',
'returnTradeHistory',
'returnAvailableAccountBalances',
'returnTradableBalances',
'returnOpenLoanOffers',
'returnOrderTrades',
'returnActiveLoans',
'returnLendingHistory',
'createLoanOffer',
'cancelLoanOffer',
'toggleAutoRenew',
'buy',
'sell',
'cancelOrder',
'moveOrder',
'withdraw',
'returnFeeInfo',
'transferBalance',
'returnMarginAccountSummary',
'marginBuy',
'marginSell',
'getMarginPosition',
'closeMarginPosition',
]
DATE_FORMAT='%Y-%m-%d %H:%M:%S'
| en | 0.16222 | Constant values for the Poloniex API | 1.303959 | 1 |
blues/tests/test_sidechain.py | MobleyLab/ncmc | 31 | 6616645 | <reponame>MobleyLab/ncmc
import unittest, parmed
from blues import utils
from blues.simulation import SystemFactory, SimulationFactory, BLUESSimulation
from simtk.openmm import app
from blues.moves import SideChainMove
from blues.moves import MoveEngine
from openmmtools import testsystems
import simtk.unit as unit
import numpy as np
from unittest import skipUnless
try:
import openeye.oechem as oechem
if not oechem.OEChemIsLicensed():
raise ImportError("Need License for OEChem! SideChainMove class will be unavailable.")
try:
import oeommtools.utils as oeommtools
except ImportError:
raise ImportError('Could not import oeommtools. SideChainMove class will be unavailable.')
has_openeye = True
except ImportError:
has_openeye = False
print('Could not import openeye-toolkits. SideChainMove class will be unavailable.')
@skipUnless(has_openeye, 'Cannot test SideChainMove without openeye-toolkits and oeommtools.')
class SideChainTester(unittest.TestCase):
"""
Test the SmartDartMove.move() function.
"""
def setUp(self):
# Obtain topologies/positions
prmtop = utils.get_data_filename('blues', 'tests/data/vacDivaline.prmtop')
inpcrd = utils.get_data_filename('blues', 'tests/data/vacDivaline.inpcrd')
self.struct = parmed.load_file(prmtop, xyz=inpcrd)
self.sidechain = SideChainMove(self.struct, [1])
self.engine = MoveEngine(self.sidechain)
self.engine.selectMove()
self.system_cfg = {'nonbondedMethod': app.NoCutoff, 'constraints': app.HBonds}
self.systems = SystemFactory(self.struct, self.sidechain.atom_indices, self.system_cfg)
self.cfg = {
'dt': 0.002 * unit.picoseconds,
'friction': 1 * 1 / unit.picoseconds,
'temperature': 300 * unit.kelvin,
'nIter': 1,
'nstepsMD': 1,
'nstepsNC': 4,
'alchemical_functions': {
'lambda_sterics':
'step(0.199999-lambda) + step(lambda-0.2)*step(0.8-lambda)*abs(lambda-0.5)*1/0.3 + step(lambda-0.800001)',
'lambda_electrostatics':
'step(0.2-lambda)- 1/0.2*lambda*step(0.2-lambda) + 1/0.2*(lambda-0.8)*step(lambda-0.8)'
}
}
self.simulations = SimulationFactory(self.systems, self.engine, self.cfg)
def test_getRotBondAtoms(self):
vals = [v for v in self.sidechain.rot_atoms[1].values()][0]
assert len(vals) == 11
#Ensure it selects 1 rotatable bond in Valine
assert len(self.sidechain.rot_bonds) == 1
def test_sidechain_move(self):
atom_indices = [v for v in self.sidechain.rot_atoms[1].values()][0]
before_move = self.simulations.ncmc.context.getState(getPositions=True).getPositions(
asNumpy=True)[atom_indices, :]
self.simulations.ncmc.context = self.engine.runEngine(self.simulations.ncmc.context)
after_move = self.simulations.ncmc.context.getState(getPositions=True).getPositions(
asNumpy=True)[atom_indices, :]
#Check that our system has run dynamics
# Integrator must step for context to update positions
# Remove the first two atoms in check as these are the anchor atoms and are not rotated.
pos_compare = np.not_equal(before_move, after_move)[2:, :].all()
assert pos_compare
if __name__ == "__main__":
unittest.main()
| import unittest, parmed
from blues import utils
from blues.simulation import SystemFactory, SimulationFactory, BLUESSimulation
from simtk.openmm import app
from blues.moves import SideChainMove
from blues.moves import MoveEngine
from openmmtools import testsystems
import simtk.unit as unit
import numpy as np
from unittest import skipUnless
try:
import openeye.oechem as oechem
if not oechem.OEChemIsLicensed():
raise ImportError("Need License for OEChem! SideChainMove class will be unavailable.")
try:
import oeommtools.utils as oeommtools
except ImportError:
raise ImportError('Could not import oeommtools. SideChainMove class will be unavailable.')
has_openeye = True
except ImportError:
has_openeye = False
print('Could not import openeye-toolkits. SideChainMove class will be unavailable.')
@skipUnless(has_openeye, 'Cannot test SideChainMove without openeye-toolkits and oeommtools.')
class SideChainTester(unittest.TestCase):
"""
Test the SmartDartMove.move() function.
"""
def setUp(self):
# Obtain topologies/positions
prmtop = utils.get_data_filename('blues', 'tests/data/vacDivaline.prmtop')
inpcrd = utils.get_data_filename('blues', 'tests/data/vacDivaline.inpcrd')
self.struct = parmed.load_file(prmtop, xyz=inpcrd)
self.sidechain = SideChainMove(self.struct, [1])
self.engine = MoveEngine(self.sidechain)
self.engine.selectMove()
self.system_cfg = {'nonbondedMethod': app.NoCutoff, 'constraints': app.HBonds}
self.systems = SystemFactory(self.struct, self.sidechain.atom_indices, self.system_cfg)
self.cfg = {
'dt': 0.002 * unit.picoseconds,
'friction': 1 * 1 / unit.picoseconds,
'temperature': 300 * unit.kelvin,
'nIter': 1,
'nstepsMD': 1,
'nstepsNC': 4,
'alchemical_functions': {
'lambda_sterics':
'step(0.199999-lambda) + step(lambda-0.2)*step(0.8-lambda)*abs(lambda-0.5)*1/0.3 + step(lambda-0.800001)',
'lambda_electrostatics':
'step(0.2-lambda)- 1/0.2*lambda*step(0.2-lambda) + 1/0.2*(lambda-0.8)*step(lambda-0.8)'
}
}
self.simulations = SimulationFactory(self.systems, self.engine, self.cfg)
def test_getRotBondAtoms(self):
vals = [v for v in self.sidechain.rot_atoms[1].values()][0]
assert len(vals) == 11
#Ensure it selects 1 rotatable bond in Valine
assert len(self.sidechain.rot_bonds) == 1
def test_sidechain_move(self):
atom_indices = [v for v in self.sidechain.rot_atoms[1].values()][0]
before_move = self.simulations.ncmc.context.getState(getPositions=True).getPositions(
asNumpy=True)[atom_indices, :]
self.simulations.ncmc.context = self.engine.runEngine(self.simulations.ncmc.context)
after_move = self.simulations.ncmc.context.getState(getPositions=True).getPositions(
asNumpy=True)[atom_indices, :]
#Check that our system has run dynamics
# Integrator must step for context to update positions
# Remove the first two atoms in check as these are the anchor atoms and are not rotated.
pos_compare = np.not_equal(before_move, after_move)[2:, :].all()
assert pos_compare
if __name__ == "__main__":
unittest.main() | en | 0.891765 | Test the SmartDartMove.move() function. # Obtain topologies/positions #Ensure it selects 1 rotatable bond in Valine #Check that our system has run dynamics # Integrator must step for context to update positions # Remove the first two atoms in check as these are the anchor atoms and are not rotated. | 2.05323 | 2 |
nanome/util/logs.py | nanome-ai/nanome-plugin-api | 1 | 6616646 | <filename>nanome/util/logs.py
import functools
import inspect
import logging
import sys
from .enum import IntEnum, auto
class Logs(object):
"""
| Allows for easy message logging without buffer issues.
| Possible log types are Debug, Warning, and Error.
"""
class LogType(IntEnum):
debug = auto()
warning = auto()
error = auto()
info = auto()
@classmethod
def error(cls, *args, **kwargs):
"""
| Prints an error
:param args: Variable length argument list
:type args: Anything printable
:param kwargs: Keyword arguments to pass to python logging module.
For options, see https://github.com/python/cpython/blob/main/Lib/logging/__init__.py#L1604
"""
module = cls.caller_name()
logger = logging.getLogger(module)
msg = ' '.join(map(str, args))
use_exc_info = sys.exc_info()[0] is not None
logger.error(msg, exc_info=use_exc_info, **kwargs)
@classmethod
def warning(cls, *args, **kwargs):
"""
| Prints a warning
:param args: Variable length argument list
:type args: Anything printable
:param kwargs: Keyword arguments to pass to python logging module.
For options, see https://github.com/python/cpython/blob/main/Lib/logging/__init__.py#L1604
"""
module = cls.caller_name()
logger = logging.getLogger(module)
msg = ' '.join(map(str, args))
logger.warning(msg, **kwargs)
@classmethod
def message(cls, *args, **kwargs):
"""
| Prints a message
:param args: Variable length argument list
:type args: Anything printable
:param kwargs: Keyword arguments to pass to python logging module.
For options, see https://github.com/python/cpython/blob/main/Lib/logging/__init__.py#L1604
"""
module = cls.caller_name()
logger = logging.getLogger(module)
msg = ' '.join(map(str, args))
logger.info(msg, **kwargs)
@classmethod
def debug(cls, *args, **kwargs):
"""
| Prints a debug message
| Prints only if plugin started in verbose mode (with -v argument)
:param args: Variable length argument list
:type args: Anything printable
:param kwargs: Keyword arguments to pass to python logging module.
For options, see https://github.com/python/cpython/blob/main/Lib/logging/__init__.py#L1604
"""
module = cls.caller_name()
logger = logging.getLogger(module)
msg = ' '.join(map(str, args))
logger.debug(msg, **kwargs)
@staticmethod
def deprecated(new_func=None, msg=""):
def deprecated_decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not wrapper.used:
warning = "Function " + func.__name__ + " is deprecated. "
if new_func is not None:
warning += "Try using " + new_func + " instead. "
warning += msg
Logs.warning(warning)
wrapper.used = True
return func(*args, **kwargs)
wrapper.used = False
return wrapper
return deprecated_decorator
@staticmethod
def caller_name(skip=2):
"""Get a name of a caller in the format module.class.method
`skip` specifies how many levels of stack to skip while getting caller
name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.
An empty string is returned if skipped levels exceed stack height
https://stackoverflow.com/questions/2654113/how-to-get-the-callers-method-name-in-the-called-method
"""
stack = inspect.stack()
start = 0 + skip
if len(stack) < start + 1:
return ''
parentframe = stack[start][0]
name = []
module = inspect.getmodule(parentframe)
# `modname` can be None when frame is executed directly in console
# TODO(techtonik): consider using __main__
if module:
name.append(module.__name__)
# detect classname
if 'self' in parentframe.f_locals:
# I don't know any way to detect call from the object method
# XXX: there seems to be no way to detect static method call - it will
# be just a function call
name.append(parentframe.f_locals['self'].__class__.__name__)
codename = parentframe.f_code.co_name
if codename != '<module>': # top level usually
name.append(codename) # function or a method
# Avoid circular refs and frame leaks
# https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack
del parentframe, stack
return ".".join(name)
| <filename>nanome/util/logs.py
import functools
import inspect
import logging
import sys
from .enum import IntEnum, auto
class Logs(object):
"""
| Allows for easy message logging without buffer issues.
| Possible log types are Debug, Warning, and Error.
"""
class LogType(IntEnum):
debug = auto()
warning = auto()
error = auto()
info = auto()
@classmethod
def error(cls, *args, **kwargs):
"""
| Prints an error
:param args: Variable length argument list
:type args: Anything printable
:param kwargs: Keyword arguments to pass to python logging module.
For options, see https://github.com/python/cpython/blob/main/Lib/logging/__init__.py#L1604
"""
module = cls.caller_name()
logger = logging.getLogger(module)
msg = ' '.join(map(str, args))
use_exc_info = sys.exc_info()[0] is not None
logger.error(msg, exc_info=use_exc_info, **kwargs)
@classmethod
def warning(cls, *args, **kwargs):
"""
| Prints a warning
:param args: Variable length argument list
:type args: Anything printable
:param kwargs: Keyword arguments to pass to python logging module.
For options, see https://github.com/python/cpython/blob/main/Lib/logging/__init__.py#L1604
"""
module = cls.caller_name()
logger = logging.getLogger(module)
msg = ' '.join(map(str, args))
logger.warning(msg, **kwargs)
@classmethod
def message(cls, *args, **kwargs):
"""
| Prints a message
:param args: Variable length argument list
:type args: Anything printable
:param kwargs: Keyword arguments to pass to python logging module.
For options, see https://github.com/python/cpython/blob/main/Lib/logging/__init__.py#L1604
"""
module = cls.caller_name()
logger = logging.getLogger(module)
msg = ' '.join(map(str, args))
logger.info(msg, **kwargs)
@classmethod
def debug(cls, *args, **kwargs):
"""
| Prints a debug message
| Prints only if plugin started in verbose mode (with -v argument)
:param args: Variable length argument list
:type args: Anything printable
:param kwargs: Keyword arguments to pass to python logging module.
For options, see https://github.com/python/cpython/blob/main/Lib/logging/__init__.py#L1604
"""
module = cls.caller_name()
logger = logging.getLogger(module)
msg = ' '.join(map(str, args))
logger.debug(msg, **kwargs)
@staticmethod
def deprecated(new_func=None, msg=""):
def deprecated_decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not wrapper.used:
warning = "Function " + func.__name__ + " is deprecated. "
if new_func is not None:
warning += "Try using " + new_func + " instead. "
warning += msg
Logs.warning(warning)
wrapper.used = True
return func(*args, **kwargs)
wrapper.used = False
return wrapper
return deprecated_decorator
@staticmethod
def caller_name(skip=2):
"""Get a name of a caller in the format module.class.method
`skip` specifies how many levels of stack to skip while getting caller
name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.
An empty string is returned if skipped levels exceed stack height
https://stackoverflow.com/questions/2654113/how-to-get-the-callers-method-name-in-the-called-method
"""
stack = inspect.stack()
start = 0 + skip
if len(stack) < start + 1:
return ''
parentframe = stack[start][0]
name = []
module = inspect.getmodule(parentframe)
# `modname` can be None when frame is executed directly in console
# TODO(techtonik): consider using __main__
if module:
name.append(module.__name__)
# detect classname
if 'self' in parentframe.f_locals:
# I don't know any way to detect call from the object method
# XXX: there seems to be no way to detect static method call - it will
# be just a function call
name.append(parentframe.f_locals['self'].__class__.__name__)
codename = parentframe.f_code.co_name
if codename != '<module>': # top level usually
name.append(codename) # function or a method
# Avoid circular refs and frame leaks
# https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack
del parentframe, stack
return ".".join(name)
| en | 0.617322 | | Allows for easy message logging without buffer issues.
| Possible log types are Debug, Warning, and Error. | Prints an error
:param args: Variable length argument list
:type args: Anything printable
:param kwargs: Keyword arguments to pass to python logging module.
For options, see https://github.com/python/cpython/blob/main/Lib/logging/__init__.py#L1604 | Prints a warning
:param args: Variable length argument list
:type args: Anything printable
:param kwargs: Keyword arguments to pass to python logging module.
For options, see https://github.com/python/cpython/blob/main/Lib/logging/__init__.py#L1604 | Prints a message
:param args: Variable length argument list
:type args: Anything printable
:param kwargs: Keyword arguments to pass to python logging module.
For options, see https://github.com/python/cpython/blob/main/Lib/logging/__init__.py#L1604 | Prints a debug message
| Prints only if plugin started in verbose mode (with -v argument)
:param args: Variable length argument list
:type args: Anything printable
:param kwargs: Keyword arguments to pass to python logging module.
For options, see https://github.com/python/cpython/blob/main/Lib/logging/__init__.py#L1604 Get a name of a caller in the format module.class.method
`skip` specifies how many levels of stack to skip while getting caller
name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.
An empty string is returned if skipped levels exceed stack height
https://stackoverflow.com/questions/2654113/how-to-get-the-callers-method-name-in-the-called-method # `modname` can be None when frame is executed directly in console # TODO(techtonik): consider using __main__ # detect classname # I don't know any way to detect call from the object method # XXX: there seems to be no way to detect static method call - it will # be just a function call # top level usually # function or a method # Avoid circular refs and frame leaks # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack | 2.794429 | 3 |
examples/transform-linear-scale.py | Frekby/glumpy | 1,074 | 6616647 | # -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 <NAME>. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
import numpy as np
from glumpy import app
from glumpy.graphics.collections import PointCollection
from glumpy.transforms import LinearScale, Position, Viewport
window = app.Window(1024,1024, color=(1,1,1,1))
@window.event
def on_draw(dt):
window.clear()
points.draw()
@window.event
def on_mouse_scroll(x,y,dx,dy):
if dy < 0:
transform["domain"] = 1.1*transform["domain"]
else:
transform["domain"] = transform["domain"]/1.1
transform = Position(LinearScale())
points = PointCollection("agg", transform = transform)
points.append( P = np.random.normal(0,.5,(10000,3)) )
window.attach(points["transform"])
window.attach(points["viewport"])
app.run()
| # -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 <NAME>. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
import numpy as np
from glumpy import app
from glumpy.graphics.collections import PointCollection
from glumpy.transforms import LinearScale, Position, Viewport
window = app.Window(1024,1024, color=(1,1,1,1))
@window.event
def on_draw(dt):
window.clear()
points.draw()
@window.event
def on_mouse_scroll(x,y,dx,dy):
if dy < 0:
transform["domain"] = 1.1*transform["domain"]
else:
transform["domain"] = transform["domain"]/1.1
transform = Position(LinearScale())
points = PointCollection("agg", transform = transform)
points.append( P = np.random.normal(0,.5,(10000,3)) )
window.attach(points["transform"])
window.attach(points["viewport"])
app.run()
| en | 0.356464 | # ----------------------------------------------------------------------------- # Copyright (c) 2009-2016 <NAME>. All rights reserved. # Distributed under the (new) BSD License. # ----------------------------------------------------------------------------- | 2.289029 | 2 |
tools/mo/openvino/tools/mo/ops/Complex.py | si-eun-kim/openvino | 1,127 | 6616648 | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.graph.graph import Node, Graph
from openvino.tools.mo.ops.op import Op
class Complex(Op):
op = 'Complex'
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'op': self.op,
'in_ports_count': 2,
'out_ports_count': 1,
'infer': Complex.infer
}
super().__init__(graph, mandatory_props, attrs)
def supported_attrs(self):
return []
@staticmethod
def infer(node: Node):
real_shape = node.in_port(0).data.get_shape()
imag_shape = node.in_port(1).data.get_shape()
if real_shape is None or imag_shape is None:
return
assert np.array_equal(real_shape, imag_shape), \
"Shapes of real and imaginary parts must be the same. Got: {} as real part shape " \
"and {} as imaginary part shape for Node {} with op {}." \
"".format(real_shape, imag_shape, node.soft_get("name", node.id), node.op)
output_shape = np.ma.append(real_shape, 2)
node.out_port(0).data.set_shape(output_shape)
| # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.graph.graph import Node, Graph
from openvino.tools.mo.ops.op import Op
class Complex(Op):
op = 'Complex'
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'op': self.op,
'in_ports_count': 2,
'out_ports_count': 1,
'infer': Complex.infer
}
super().__init__(graph, mandatory_props, attrs)
def supported_attrs(self):
return []
@staticmethod
def infer(node: Node):
real_shape = node.in_port(0).data.get_shape()
imag_shape = node.in_port(1).data.get_shape()
if real_shape is None or imag_shape is None:
return
assert np.array_equal(real_shape, imag_shape), \
"Shapes of real and imaginary parts must be the same. Got: {} as real part shape " \
"and {} as imaginary part shape for Node {} with op {}." \
"".format(real_shape, imag_shape, node.soft_get("name", node.id), node.op)
output_shape = np.ma.append(real_shape, 2)
node.out_port(0).data.set_shape(output_shape)
| de | 0.250642 | # Copyright (C) 2018-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 | 2.274046 | 2 |
logicmonitor_sdk/models/map_item_info.py | JeremyTangCD/lm-sdk-python | 0 | 6616649 | # coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class MapItemInfo(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'alert_status': 'str',
'display_name': 'str',
'formatted_location': 'str',
'latitude': 'str',
'description': 'str',
'type': 'str',
'sdt_status': 'str',
'active_status': 'str',
'name': 'str',
'sub_type': 'str',
'location': 'str',
'id': 'int',
'longitude': 'str'
}
attribute_map = {
'alert_status': 'alertStatus',
'display_name': 'displayName',
'formatted_location': 'formattedLocation',
'latitude': 'latitude',
'description': 'description',
'type': 'type',
'sdt_status': 'sdtStatus',
'active_status': 'activeStatus',
'name': 'name',
'sub_type': 'subType',
'location': 'location',
'id': 'id',
'longitude': 'longitude'
}
def __init__(self, alert_status=None, display_name=None, formatted_location=None, latitude=None, description=None, type=None, sdt_status=None, active_status=None, name=None, sub_type=None, location=None, id=None, longitude=None): # noqa: E501
"""MapItemInfo - a model defined in Swagger""" # noqa: E501
self._alert_status = None
self._display_name = None
self._formatted_location = None
self._latitude = None
self._description = None
self._type = None
self._sdt_status = None
self._active_status = None
self._name = None
self._sub_type = None
self._location = None
self._id = None
self._longitude = None
self.discriminator = None
if alert_status is not None:
self.alert_status = alert_status
if display_name is not None:
self.display_name = display_name
if formatted_location is not None:
self.formatted_location = formatted_location
if latitude is not None:
self.latitude = latitude
if description is not None:
self.description = description
if type is not None:
self.type = type
if sdt_status is not None:
self.sdt_status = sdt_status
if active_status is not None:
self.active_status = active_status
if name is not None:
self.name = name
if sub_type is not None:
self.sub_type = sub_type
if location is not None:
self.location = location
if id is not None:
self.id = id
if longitude is not None:
self.longitude = longitude
@property
def alert_status(self):
"""Gets the alert_status of this MapItemInfo. # noqa: E501
:return: The alert_status of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._alert_status
@alert_status.setter
def alert_status(self, alert_status):
"""Sets the alert_status of this MapItemInfo.
:param alert_status: The alert_status of this MapItemInfo. # noqa: E501
:type: str
"""
self._alert_status = alert_status
@property
def display_name(self):
"""Gets the display_name of this MapItemInfo. # noqa: E501
:return: The display_name of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this MapItemInfo.
:param display_name: The display_name of this MapItemInfo. # noqa: E501
:type: str
"""
self._display_name = display_name
@property
def formatted_location(self):
"""Gets the formatted_location of this MapItemInfo. # noqa: E501
:return: The formatted_location of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._formatted_location
@formatted_location.setter
def formatted_location(self, formatted_location):
"""Sets the formatted_location of this MapItemInfo.
:param formatted_location: The formatted_location of this MapItemInfo. # noqa: E501
:type: str
"""
self._formatted_location = formatted_location
@property
def latitude(self):
"""Gets the latitude of this MapItemInfo. # noqa: E501
:return: The latitude of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._latitude
@latitude.setter
def latitude(self, latitude):
"""Sets the latitude of this MapItemInfo.
:param latitude: The latitude of this MapItemInfo. # noqa: E501
:type: str
"""
self._latitude = latitude
@property
def description(self):
"""Gets the description of this MapItemInfo. # noqa: E501
:return: The description of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this MapItemInfo.
:param description: The description of this MapItemInfo. # noqa: E501
:type: str
"""
self._description = description
@property
def type(self):
"""Gets the type of this MapItemInfo. # noqa: E501
:return: The type of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this MapItemInfo.
:param type: The type of this MapItemInfo. # noqa: E501
:type: str
"""
self._type = type
@property
def sdt_status(self):
"""Gets the sdt_status of this MapItemInfo. # noqa: E501
:return: The sdt_status of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._sdt_status
@sdt_status.setter
def sdt_status(self, sdt_status):
"""Sets the sdt_status of this MapItemInfo.
:param sdt_status: The sdt_status of this MapItemInfo. # noqa: E501
:type: str
"""
self._sdt_status = sdt_status
@property
def active_status(self):
"""Gets the active_status of this MapItemInfo. # noqa: E501
:return: The active_status of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._active_status
@active_status.setter
def active_status(self, active_status):
"""Sets the active_status of this MapItemInfo.
:param active_status: The active_status of this MapItemInfo. # noqa: E501
:type: str
"""
self._active_status = active_status
@property
def name(self):
"""Gets the name of this MapItemInfo. # noqa: E501
:return: The name of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this MapItemInfo.
:param name: The name of this MapItemInfo. # noqa: E501
:type: str
"""
self._name = name
@property
def sub_type(self):
"""Gets the sub_type of this MapItemInfo. # noqa: E501
:return: The sub_type of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._sub_type
@sub_type.setter
def sub_type(self, sub_type):
"""Sets the sub_type of this MapItemInfo.
:param sub_type: The sub_type of this MapItemInfo. # noqa: E501
:type: str
"""
self._sub_type = sub_type
@property
def location(self):
"""Gets the location of this MapItemInfo. # noqa: E501
:return: The location of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._location
@location.setter
def location(self, location):
"""Sets the location of this MapItemInfo.
:param location: The location of this MapItemInfo. # noqa: E501
:type: str
"""
self._location = location
@property
def id(self):
"""Gets the id of this MapItemInfo. # noqa: E501
:return: The id of this MapItemInfo. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this MapItemInfo.
:param id: The id of this MapItemInfo. # noqa: E501
:type: int
"""
self._id = id
@property
def longitude(self):
"""Gets the longitude of this MapItemInfo. # noqa: E501
:return: The longitude of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._longitude
@longitude.setter
def longitude(self, longitude):
"""Sets the longitude of this MapItemInfo.
:param longitude: The longitude of this MapItemInfo. # noqa: E501
:type: str
"""
self._longitude = longitude
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MapItemInfo, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MapItemInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| # coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class MapItemInfo(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'alert_status': 'str',
'display_name': 'str',
'formatted_location': 'str',
'latitude': 'str',
'description': 'str',
'type': 'str',
'sdt_status': 'str',
'active_status': 'str',
'name': 'str',
'sub_type': 'str',
'location': 'str',
'id': 'int',
'longitude': 'str'
}
attribute_map = {
'alert_status': 'alertStatus',
'display_name': 'displayName',
'formatted_location': 'formattedLocation',
'latitude': 'latitude',
'description': 'description',
'type': 'type',
'sdt_status': 'sdtStatus',
'active_status': 'activeStatus',
'name': 'name',
'sub_type': 'subType',
'location': 'location',
'id': 'id',
'longitude': 'longitude'
}
def __init__(self, alert_status=None, display_name=None, formatted_location=None, latitude=None, description=None, type=None, sdt_status=None, active_status=None, name=None, sub_type=None, location=None, id=None, longitude=None): # noqa: E501
"""MapItemInfo - a model defined in Swagger""" # noqa: E501
self._alert_status = None
self._display_name = None
self._formatted_location = None
self._latitude = None
self._description = None
self._type = None
self._sdt_status = None
self._active_status = None
self._name = None
self._sub_type = None
self._location = None
self._id = None
self._longitude = None
self.discriminator = None
if alert_status is not None:
self.alert_status = alert_status
if display_name is not None:
self.display_name = display_name
if formatted_location is not None:
self.formatted_location = formatted_location
if latitude is not None:
self.latitude = latitude
if description is not None:
self.description = description
if type is not None:
self.type = type
if sdt_status is not None:
self.sdt_status = sdt_status
if active_status is not None:
self.active_status = active_status
if name is not None:
self.name = name
if sub_type is not None:
self.sub_type = sub_type
if location is not None:
self.location = location
if id is not None:
self.id = id
if longitude is not None:
self.longitude = longitude
@property
def alert_status(self):
"""Gets the alert_status of this MapItemInfo. # noqa: E501
:return: The alert_status of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._alert_status
@alert_status.setter
def alert_status(self, alert_status):
"""Sets the alert_status of this MapItemInfo.
:param alert_status: The alert_status of this MapItemInfo. # noqa: E501
:type: str
"""
self._alert_status = alert_status
@property
def display_name(self):
"""Gets the display_name of this MapItemInfo. # noqa: E501
:return: The display_name of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this MapItemInfo.
:param display_name: The display_name of this MapItemInfo. # noqa: E501
:type: str
"""
self._display_name = display_name
@property
def formatted_location(self):
"""Gets the formatted_location of this MapItemInfo. # noqa: E501
:return: The formatted_location of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._formatted_location
@formatted_location.setter
def formatted_location(self, formatted_location):
"""Sets the formatted_location of this MapItemInfo.
:param formatted_location: The formatted_location of this MapItemInfo. # noqa: E501
:type: str
"""
self._formatted_location = formatted_location
@property
def latitude(self):
"""Gets the latitude of this MapItemInfo. # noqa: E501
:return: The latitude of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._latitude
@latitude.setter
def latitude(self, latitude):
"""Sets the latitude of this MapItemInfo.
:param latitude: The latitude of this MapItemInfo. # noqa: E501
:type: str
"""
self._latitude = latitude
@property
def description(self):
"""Gets the description of this MapItemInfo. # noqa: E501
:return: The description of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this MapItemInfo.
:param description: The description of this MapItemInfo. # noqa: E501
:type: str
"""
self._description = description
@property
def type(self):
"""Gets the type of this MapItemInfo. # noqa: E501
:return: The type of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this MapItemInfo.
:param type: The type of this MapItemInfo. # noqa: E501
:type: str
"""
self._type = type
@property
def sdt_status(self):
"""Gets the sdt_status of this MapItemInfo. # noqa: E501
:return: The sdt_status of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._sdt_status
@sdt_status.setter
def sdt_status(self, sdt_status):
"""Sets the sdt_status of this MapItemInfo.
:param sdt_status: The sdt_status of this MapItemInfo. # noqa: E501
:type: str
"""
self._sdt_status = sdt_status
@property
def active_status(self):
"""Gets the active_status of this MapItemInfo. # noqa: E501
:return: The active_status of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._active_status
@active_status.setter
def active_status(self, active_status):
"""Sets the active_status of this MapItemInfo.
:param active_status: The active_status of this MapItemInfo. # noqa: E501
:type: str
"""
self._active_status = active_status
@property
def name(self):
"""Gets the name of this MapItemInfo. # noqa: E501
:return: The name of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this MapItemInfo.
:param name: The name of this MapItemInfo. # noqa: E501
:type: str
"""
self._name = name
@property
def sub_type(self):
"""Gets the sub_type of this MapItemInfo. # noqa: E501
:return: The sub_type of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._sub_type
@sub_type.setter
def sub_type(self, sub_type):
"""Sets the sub_type of this MapItemInfo.
:param sub_type: The sub_type of this MapItemInfo. # noqa: E501
:type: str
"""
self._sub_type = sub_type
@property
def location(self):
"""Gets the location of this MapItemInfo. # noqa: E501
:return: The location of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._location
@location.setter
def location(self, location):
"""Sets the location of this MapItemInfo.
:param location: The location of this MapItemInfo. # noqa: E501
:type: str
"""
self._location = location
@property
def id(self):
"""Gets the id of this MapItemInfo. # noqa: E501
:return: The id of this MapItemInfo. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this MapItemInfo.
:param id: The id of this MapItemInfo. # noqa: E501
:type: int
"""
self._id = id
@property
def longitude(self):
"""Gets the longitude of this MapItemInfo. # noqa: E501
:return: The longitude of this MapItemInfo. # noqa: E501
:rtype: str
"""
return self._longitude
@longitude.setter
def longitude(self, longitude):
"""Sets the longitude of this MapItemInfo.
:param longitude: The longitude of this MapItemInfo. # noqa: E501
:type: str
"""
self._longitude = longitude
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MapItemInfo, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MapItemInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| en | 0.439137 | # coding: utf-8 LogicMonitor REST API LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git # noqa: F401 NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. # noqa: E501 MapItemInfo - a model defined in Swagger # noqa: E501 Gets the alert_status of this MapItemInfo. # noqa: E501 :return: The alert_status of this MapItemInfo. # noqa: E501 :rtype: str Sets the alert_status of this MapItemInfo. :param alert_status: The alert_status of this MapItemInfo. # noqa: E501 :type: str Gets the display_name of this MapItemInfo. # noqa: E501 :return: The display_name of this MapItemInfo. # noqa: E501 :rtype: str Sets the display_name of this MapItemInfo. :param display_name: The display_name of this MapItemInfo. # noqa: E501 :type: str Gets the formatted_location of this MapItemInfo. # noqa: E501 :return: The formatted_location of this MapItemInfo. # noqa: E501 :rtype: str Sets the formatted_location of this MapItemInfo. :param formatted_location: The formatted_location of this MapItemInfo. # noqa: E501 :type: str Gets the latitude of this MapItemInfo. # noqa: E501 :return: The latitude of this MapItemInfo. # noqa: E501 :rtype: str Sets the latitude of this MapItemInfo. :param latitude: The latitude of this MapItemInfo. # noqa: E501 :type: str Gets the description of this MapItemInfo. # noqa: E501 :return: The description of this MapItemInfo. # noqa: E501 :rtype: str Sets the description of this MapItemInfo. :param description: The description of this MapItemInfo. # noqa: E501 :type: str Gets the type of this MapItemInfo. # noqa: E501 :return: The type of this MapItemInfo. # noqa: E501 :rtype: str Sets the type of this MapItemInfo. :param type: The type of this MapItemInfo. # noqa: E501 :type: str Gets the sdt_status of this MapItemInfo. # noqa: E501 :return: The sdt_status of this MapItemInfo. # noqa: E501 :rtype: str Sets the sdt_status of this MapItemInfo. :param sdt_status: The sdt_status of this MapItemInfo. # noqa: E501 :type: str Gets the active_status of this MapItemInfo. # noqa: E501 :return: The active_status of this MapItemInfo. # noqa: E501 :rtype: str Sets the active_status of this MapItemInfo. :param active_status: The active_status of this MapItemInfo. # noqa: E501 :type: str Gets the name of this MapItemInfo. # noqa: E501 :return: The name of this MapItemInfo. # noqa: E501 :rtype: str Sets the name of this MapItemInfo. :param name: The name of this MapItemInfo. # noqa: E501 :type: str Gets the sub_type of this MapItemInfo. # noqa: E501 :return: The sub_type of this MapItemInfo. # noqa: E501 :rtype: str Sets the sub_type of this MapItemInfo. :param sub_type: The sub_type of this MapItemInfo. # noqa: E501 :type: str Gets the location of this MapItemInfo. # noqa: E501 :return: The location of this MapItemInfo. # noqa: E501 :rtype: str Sets the location of this MapItemInfo. :param location: The location of this MapItemInfo. # noqa: E501 :type: str Gets the id of this MapItemInfo. # noqa: E501 :return: The id of this MapItemInfo. # noqa: E501 :rtype: int Sets the id of this MapItemInfo. :param id: The id of this MapItemInfo. # noqa: E501 :type: int Gets the longitude of this MapItemInfo. # noqa: E501 :return: The longitude of this MapItemInfo. # noqa: E501 :rtype: str Sets the longitude of this MapItemInfo. :param longitude: The longitude of this MapItemInfo. # noqa: E501 :type: str Returns the model properties as a dict Returns the string representation of the model For `print` and `pprint` Returns true if both objects are equal Returns true if both objects are not equal | 1.765285 | 2 |
MEMOCODE_2018_Benchmarks/HELLO_RNN/Python/RNN.py | PRETgroup/sann | 0 | 6616650 | import numpy as np
import random as rand
import math
from timeit import default_timer as timer
class RNN:
def __init__(self, num_layers=3, bias=1, lr=0.01, weights=[], activation=[1, 0], layers=[]):
self.num_layers = num_layers
self.layers = layers
self.weights = weights
self.neurons = []
self.sums = []
self.bias = bias
self.activation = activation
self.hidden_prev = [0 for _ in range(self.layers[1])]
self.lr = lr
self.loss = 0
self.max_weights = 0
self.r_layer = 1
self.weights_unfolded = []
self.sums_unfolded = []
self.neurons_unfolded = []
self.delta_sums = []
if len(self.weights) < num_layers:
self.weights = [[] for _ in range(num_layers)]
for i in range(self.num_layers):
num_weights = 0
if i == 0: # input-to-hidden layer weights
num_weights = self.layers[0] * self.layers[1]
if self.bias:
num_weights = num_weights + self.layers[1]
for j in range(num_weights):
rand_range = 1.0 / np.sqrt(self.layers[0] + 1)
self.weights[0].append(rand.uniform(-rand_range, rand_range))
elif i == self.r_layer: # hidden-to-hidden layer weights
num_weights = self.layers[self.r_layer] * self.layers[self.r_layer]
for j in range(num_weights):
rand_range = 1.0/np.sqrt(self.layers[self.r_layer] + 1)
self.weights[-1].append(rand.uniform(-rand_range, rand_range))
else: # hidden-to-other and other-to-other layer weights
num_weights = self.layers[i - 1] * self.layers[i]
if self.bias and i > 0: # bias only for hidden and output
num_weights = num_weights + self.layers[i]
for j in range(num_weights):
rand_range = 1.0 / np.sqrt(self.layers[i - 1] + 1)
self.weights[i - 1].append(rand.uniform(-rand_range, rand_range))
if num_weights > self.max_weights:
self.max_weights = num_weights
def sigmoid(self, x):
if x > 100:
return 0
if x < -100:
return 1
return 1 / (1 + math.exp(-x))
def relu(self, x):
relu_const = 1
if x > 0:
return relu_const * x
else:
return 0
def linear(self, x):
linear_const = 1
return linear_const * x
def softmax(self, x=[]):
sum_exp = 0.0
for i in range(len(x)):
sum_exp = sum_exp + np.exp(x[i])
return [np.exp(x[i])/sum_exp for i in range(len(x))]
def activation_function(self, val, layer):
if self.activation[layer] == 0:
return self.sigmoid(val)
if self.activation[layer] == 1:
return np.tanh(val)
if self.activation[layer] == 2:
return self.relu(val)
if self.activation[layer] == 3:
return self.linear(val)
if self.activation[layer] > 3 or self.activation[layer] < 0:
return self.sigmoid(val)
def loss_function(self, target, predicted):
if self.loss == 0:
return target - predicted
if self.loss == 1:
return -target*np.log(predicted)
if self.loss < 0 or self.loss > 1:
return target - predicted
# run a single input value through the RNN
def run(self, inputs=[]):
self.neurons = []
self.neurons.append(inputs)
self.sums = []
self.sums.append([])
for i in range(1, self.num_layers): # go through all layers
self.neurons.append([])
self.sums.append([])
for j in range(self.layers[i]): # current layer neurons
weighted_sum = 0
for k in range(self.layers[i - 1]): # previous layer neurons
weighted_sum = weighted_sum + \
self.neurons[i - 1][k] * self.weights[i - 1][j * self.layers[i - 1] + k]
if i == self.r_layer: # hidden recurrent layer neurons
for k in range(self.layers[self.r_layer]): # adding previous hidden values
weighted_sum = weighted_sum + self.hidden_prev[k] * self.weights[-1][j * self.layers[self.r_layer] + k]
if self.bias: # adding bias values
weighted_sum = weighted_sum + self.weights[i - 1][self.layers[i] * self.layers[i - 1] + j]
self.sums[i].append(weighted_sum)
if not(self.activation[i - 1] == 4): # not softmax (softmax needs whole array)
self.neurons[i].append(self.activation_function(weighted_sum, i - 1))
if self.activation[i - 1] == 4: # softmax activation (softmax needs whole array)
soft_out = self.softmax(self.sums[i])
for val in soft_out:
self.neurons[i].append(val)
self.hidden_prev = [self.neurons[self.r_layer][j] for j in range(len(self.neurons[self.r_layer]))]
return self.neurons[-1]
# run a time sequence through the RNN
def run_sequence(self, input_set=[]):
self.hidden_prev = [0 for _ in range(self.layers[1])]
output_set = []
for t in range(len(input_set)):
output_set.append(self.run(input_set[t]))
for t in range(len(output_set)):
for i in range(len(output_set[t])): # transform softmax output into unicode input
if output_set[t][i] == np.max(output_set[t]):
output_set[t][i] = 1
else:
output_set[t][i] = 0
return output_set
# run a time sequence through the RNN given only the starting value
def run_start(self, length=10, inputs=[]):
self.hidden_prev = [0 for _ in range(self.layers[1])]
output_set = []
output_set.append(self.run(inputs))
for i in range(len(output_set[0])): # transform softmax output into unicode input
if output_set[0][i] == np.max(output_set[0]):
output_set[0][i] = 1
else:
output_set[0][i] = 0
for t in range(1, length): # start on second iteration
output_set.append(self.run(output_set[t - 1])) # run RNN with previous outputs
for i in range(len(output_set[t])): # transform softmax output into unicode input
if output_set[t][i] == np.max(output_set[t]):
output_set[t][i] = 1
else:
output_set[t][i] = 0
return output_set
def unfold(self, steps):
self.weights_unfolded = []
for i in range(steps): # add unfolded layers until recurrent layer
self.weights_unfolded.append(self.weights[0])
self.weights_unfolded.append(self.weights[-1])
for j in range(self.r_layer, self.num_layers - 1): # add one set of layers following the recurrent layer
self.weights_unfolded.append(self.weights[j])
def run_unfolded(self, steps, input_set=[]):
self.sums_unfolded = []
self.sums_unfolded.append([])
self.neurons_unfolded = []
self.neurons_unfolded.append(self.hidden_prev)
for i in range(steps): # run through all input and recurrent layers
self.neurons_unfolded.append([])
self.sums_unfolded.append([])
for j in range(self.layers[self.r_layer]): # run through all recurrent neurons
weighted_sum = 0
for k in range(self.layers[0]): # adding input layer neurons
weighted_sum = weighted_sum + input_set[i][k] * self.weights[0][j * self.layers[0] + k]
for k in range(self.layers[self.r_layer]): # adding previous hidden values
weighted_sum = weighted_sum + self.neurons_unfolded[i][k] * \
self.weights[-1][j * self.layers[self.r_layer] + k]
if self.bias: # adding bias values
weighted_sum = weighted_sum + self.weights[0][self.layers[1] * self.layers[0] + j]
self.sums_unfolded[-1].append(weighted_sum)
self.neurons_unfolded[-1].append(self.activation_function(weighted_sum, 0))
# calculate output layer values for final hidden layer output
for i in range(self.r_layer + 1, self.num_layers):
self.neurons_unfolded.append([])
self.sums_unfolded.append([])
for j in range(self.layers[i]):
weighted_sum = 0
for k in range(self.layers[i - 1]): # previous layer neurons
weighted_sum = weighted_sum + \
self.neurons_unfolded[-2][k] * self.weights[i - 1][j * self.layers[i - 1] + k]
if self.bias: # adding bias values
weighted_sum = weighted_sum + self.weights[i - 1][self.layers[i] * self.layers[i - 1] + j]
self.sums_unfolded[-1].append(weighted_sum)
if not (self.activation[i - 1] == 4): # not softmax (softmax needs whole array)
self.neurons_unfolded[-1].append(self.activation_function(weighted_sum, i - 1))
if self.activation[i - 1] == 4: # softmax activation (softmax needs whole array)
soft_out = self.softmax(self.sums_unfolded[-1])
for val in soft_out:
self.neurons_unfolded[-1].append(val)
return self.neurons_unfolded[-1]
def backpropagate(self, steps, predicted_outputs=[], step_outputs=[], delta_accumulate=[], step_inputs=[]):
# first get error(s) at output
self.delta_sums = []
self.delta_sums.append([self.loss_function(step_outputs[i], predicted_outputs[i]) for i in range(self.layers[-1])])
# iterate backwards starting at the outputs and ending just before the recurrent layer
for layer in range(self.num_layers - self.r_layer - 1):
self.delta_sums.insert(0, [0 for _ in range(self.layers[-1 - layer - 1])])
# run through each neuron in current layer
error_gradients = []
if self.activation[-1 - layer] == 4: # softmax activation if necessary
sum = 0
exps = [] # store exp values of all sums
for x in range(self.layers[-1 - layer]):
exps.append(np.exp(self.sums_unfolded[-1 - layer][x]))
sum = sum + exps[-1]
sum = np.power(sum, 2) # square the sum
for x in range(self.layers[-1 - layer]):
partial_sum = 0
for y in range(self.layers[-1 - layer]): # sum all other terms
if not(y == x):
partial_sum = partial_sum + exps[y]
partial_sum = partial_sum * exps[x]
error_gradients.append(partial_sum/sum)
for i in range(self.layers[-1 - layer]):
# run through each neuron in previous layer
for j in range(self.layers[-1 - layer - 1]):
# back-propagate error
self.delta_sums[0][j] = self.delta_sums[0][j] + self.delta_sums[1][i] * \
self.weights_unfolded[-1 - layer][i * self.layers[-1 - layer - 1] + j]
error_gradient = 0
# calculate error gradient for current layer
if self.activation[-1 - layer] == 0: # differentiate sigmoid(x) = f(x): f'(x) = f(x)[1 - f(x)]
error_gradient = self.neurons_unfolded[-1 - layer][i] * (1 - self.neurons_unfolded[-1 - layer][i])
if self.activation[-1 - layer] == 1: # differentiate tanh(x) = f(x): f'(x) = sech(x)^2 = 1/cosh(x)^2
error_gradient = 1.0/np.power(np.cosh(self.sums_unfolded[-1 - layer][i]), 2)
if self.activation[-1 - layer] == 2: # differentiate relu(x) = 1 if sum > 0, otherwise 0
if self.sums_unfolded[-1 - layer][i] > 0:
error_gradient = 1
if self.activation[-1 - layer] == 3: # differentiate x = x
error_gradient = 1
if self.activation[-1 - layer] == 4: # differentiate softmax done earlier
error_gradient = error_gradients[i]
weight_update = self.lr * self.delta_sums[-1 - layer][i] * \
error_gradient * self.neurons_unfolded[-1 - layer - 1][j]
# print "Weight update0:", self.lr, "*", self.delta_sums[-1 - layer][i], "*", \
# error_gradient, "*", self.neurons_unfolded[-1 - layer - 1][j], "=", weight_update
delta_accumulate[-1 - layer - 1][i * self.layers[-1 - layer - 1] + j] = \
delta_accumulate[-1 - layer - 1][i * self.layers[-1 - layer - 1] + j] + weight_update
if self.bias:
error_gradient = 0
# calculate error gradient for current layer
if self.activation[-1 - layer] == 0: # differentiate sigmoid(x) = f(x): f'(x) = f(x)[1 - f(x)]
error_gradient = self.neurons_unfolded[-1 - layer][i] * (1 - self.neurons_unfolded[-1 - layer][i])
if self.activation[
-1 - layer] == 1: # differentiate tanh(x) = f(x): f'(x) = sech(x)^2 = 1/cosh(x)^2
error_gradient = 1.0 / np.power(np.cosh(self.sums_unfolded[-1 - layer][i]), 2)
if self.activation[-1 - layer] == 2: # differentiate relu(x) = 1 if sum > 0, otherwise 0
if self.sums_unfolded[-1 - layer][i] > 0:
error_gradient = 1
if self.activation[-1 - layer] == 3: # differentiate x = x
error_gradient = 1
if self.activation[-1 - layer] == 4: # differentiate softmax done earlier
error_gradient = error_gradients[i]
weight_update = self.lr * self.delta_sums[-1 - layer][i] * error_gradient
# print "Weight update1:", self.lr, "*", self.delta_sums[-1 - layer - 1][i], "*", \
# error_gradient, "=", weight_update
delta_accumulate[-1 - layer - 1][self.layers[-1 - layer] * self.layers[-1 - layer - 1] + i] = \
delta_accumulate[-1 - layer - 1][self.layers[-1 - layer] * self.layers[-1 - layer - 1] + i] + \
weight_update
# iterate backwards through the unfolded weights array, starting at the last recurrent layer
for layer in range(steps, 0, -1):
if layer > 1: # zero delta sums for previous layer
self.delta_sums.insert(0, [0 for _ in range(self.layers[self.r_layer])])
# run through each neuron in current layer (recurrent layer)
for i in range(self.layers[self.r_layer]):
# run through each neuron in previous layer (recurrent layer)
for j in range(self.layers[self.r_layer]):
if layer > 1: # don't back-propagate into input layer
# back-propagate error to recurrent layers (only accessing recurrent layer weights)
self.delta_sums[0][j] = self.delta_sums[0][j] + self.delta_sums[1][i] * \
self.weights_unfolded[layer * 2 - 1][i * self.layers[self.r_layer] + j]
error_gradient = 0
# calculate error gradient for current layer
if self.activation[0] == 0: # differentiate sigmoid(x) = f(x): f'(x) = f(x)[1 - f(x)]
error_gradient = self.neurons_unfolded[layer][i] * (1 - self.neurons_unfolded[layer][i])
if self.activation[0] == 1: # differentiate tanh(x) = f(x): f'(x) = sech(x)^2 = 1/cosh(x)^2
error_gradient = 1.0 / np.power(np.cosh(self.sums_unfolded[layer][i]), 2)
if self.activation[0] == 2: # differentiate relu(x) = 1 if sum > 0, otherwise 0
if self.sums_unfolded[layer][i] > 0:
error_gradient = 1
if self.activation[0] == 3: # differentiate x = 1
error_gradient = 1
# weight update for recurrent neuron
weight_update = self.lr * self.delta_sums[0][i] * \
error_gradient * self.neurons_unfolded[layer - 1][j]
# print "Weight update2:", self.lr, "*", self.delta_sums[layer - 1][i], "*", \
# error_gradient, "*", self.neurons_unfolded[layer - 1][j], "=", weight_update
# store at -1, since recurrent weights are stored at -1
delta_accumulate[-1][i * self.layers[self.r_layer] + j] = \
delta_accumulate[-1][i * self.layers[self.r_layer] + j] + weight_update
# run through each neuron in previous layer (input layer)
for j in range(self.layers[0]):
error_gradient = 0
# calculate error gradient for current layer
if self.activation[0] == 0: # differentiate sigmoid(x) = f(x): f'(x) = f(x)[1 - f(x)]
error_gradient = self.neurons_unfolded[layer][i] * (1 - self.neurons_unfolded[layer][i])
if self.activation[0] == 1: # differentiate tanh(x) = f(x): f'(x) = sech(x)^2 = 1/cosh(x)^2
error_gradient = 1.0 / np.power(np.cosh(self.sums_unfolded[layer][i]), 2)
if self.activation[0] == 2: # differentiate relu(x) = 1 if sum > 0, otherwise 0
if self.sums_unfolded[layer][i] > 0:
error_gradient = 1
if self.activation[0] == 3: # differentiate x = 1
error_gradient = 1
# weight update for input neuron
weight_update = self.lr * self.delta_sums[0][i] * \
error_gradient * step_inputs[layer - 1][j]
# print "Weight update3:", self.lr, "*", self.delta_sums[layer - 1][i], "*", \
# error_gradient, "*", step_inputs[layer - 1][j], "=", weight_update
delta_accumulate[0][i * self.layers[0] + j] = \
delta_accumulate[0][i * self.layers[0] + j] + weight_update
# bias neuron update
if self.bias:
error_gradient = 0
# calculate error gradient for current layer
if self.activation[0] == 0: # differentiate sigmoid(x) = f(x): f'(x) = f(x)[1 - f(x)]
error_gradient = self.neurons_unfolded[layer][i] * (1 - self.neurons_unfolded[layer][i])
if self.activation[0] == 1: # differentiate tanh(x) = f(x): f'(x) = sech(x)^2 = 1/cosh(x)^2
error_gradient = 1.0 / np.power(np.cosh(self.sums_unfolded[layer][i]), 2)
if self.activation[0] == 2: # differentiate relu(x) = 1 if sum > 0, otherwise 0
if self.sums_unfolded[layer][i] > 0:
error_gradient = 1
if self.activation[0] == 3: # differentiate x = 1
error_gradient = 1
# weight update for recurrent neuron
weight_update = self.lr * self.delta_sums[0][i] * error_gradient
# print "Weight update4:", self.lr, "*", self.delta_sums[layer - 1][i], "*", \
# error_gradient, "=", weight_update
delta_accumulate[0][self.layers[self.r_layer] * self.layers[0] + i] = \
delta_accumulate[0][self.layers[self.r_layer] * self.layers[0] + i] + weight_update
def train_one(self, epochs=100, error=0.01, steps=1, data=[]):
num_epochs = 0
error_tot = 1
self.hidden_prev = [0 for _ in range(self.layers[1])]
if steps > len(data[0]):
steps = len(data[0])
while num_epochs < epochs and error_tot > error: # run through data until trained or epoch finished
error_tot = 0.0
# unfold necessary layers
self.unfold(steps)
for t in range(len(data[0]) + 1 - steps): # run through all the data in time steps
# select data inputs
step_inputs = data[0][t: t + steps]
# run inputs through unfolded network
predicted_outputs = self.run_unfolded(steps, step_inputs)
step_outputs = data[1][t] # only 1 data output for time t
error_tot = error_tot + np.sum([np.power(self.loss_function(step_outputs[i], predicted_outputs[i]), 2)
for i in range(len(step_outputs))])/float(len(step_outputs))
# store cumulative weight updates
delta_accumulate = [[0 for _ in range(len(self.weights[i]))] for i in range(self.num_layers)]
# back-propagate error and store weight changes
self.backpropagate(steps, predicted_outputs, step_outputs, delta_accumulate, step_inputs)
# print "DA 0:", delta_accumulate[0]
# print "DA 1:", delta_accumulate[1]
# print "DA 2:", delta_accumulate[2]
# update weights in folded network
for i in range(len(delta_accumulate)):
for j in range(len(delta_accumulate[i])):
# if i <= steps * self.r_layer:
# delta_accumulate[i][j] = delta_accumulate[i][j] / float(steps)
self.weights[i][j] = self.weights[i][j] + delta_accumulate[i][j]
# get recurrent hidden inputs for next iteration using a folded run
self.run(data[0][t])
error_tot = error_tot/(len(data[0]) + 1 - steps)
print "At Epoch:", num_epochs, "-> Current error is:", error_tot
num_epochs = num_epochs + 1
start = timer()
# print rnn.run_sequence([[1, 0, 0], [0, 0, 1]])
# print rnn.run_start(length=5, inputs=[1, 0, 0])
# rnn.unfold(1)
# print rnn.run_unfolded(1, [[1, 0, 0]])
#
# rnn.unfold(2)
# print rnn.run_unfolded(2, [[1, 0, 0], [0, 1, 0]])
# rnn.unfold(1)
# rnn.run_unfolded(1, [[0, 0, 1, 0]])
# delta_accumulate = [[0 for _ in range(len(rnn.weights[i]))] for i in range(rnn.num_layers)]
# rnn.backpropagate(1, [1, 0, 0, 0], [0, 1, 0, 0], delta_accumulate, [[0, 0, 1, 0]])
# data = [[ # inputs
# [0, 0, 1, 0, 0], # 0
# [0, 1, 0, 0, 0], # 1
# [1, 0, 0, 0, 0], # 2
# [0, 1, 0, 0, 0], # 1
# [0, 0, 1, 0, 0], # 0
# [0, 0, 0, 1, 0], # -1
# [0, 0, 0, 0, 1], # -2
# [0, 0, 0, 1, 0], # -1
# [0, 0, 1, 0, 0] # 0
# ], [ # outputs
# [0, 1, 0, 0, 0], # 1
# [1, 0, 0, 0, 0], # 2
# [0, 1, 0, 0, 0], # 1
# [0, 0, 1, 0, 0], # 0
# [0, 0, 0, 1, 0], # -1
# [0, 0, 0, 0, 1], # -2
# [0, 0, 0, 1, 0], # -1
# [0, 0, 1, 0, 0], # 0
# [0, 1, 0, 0, 0] # 1
# ]]
data = [[
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0],
], [
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0],
[1, 0, 0, 0]
]]
# count = 9
# while count > 0:
rnn = RNN(layers=[4, 4, 4], lr=0.1, activation=[1, 0])
rnn.train_one(data=data, error=0.007, epochs=1000)
# rnn.train_one(steps=2, data=data, error=0.01, epochs=1000)
# rnn.train_one(steps=3, data=data, error=0.01)
# rnn.train_one(steps=4, data=data, error=0.01)
# rnn.weights[0] = [0.2632778982740025, 1.2873720206851751, 1.7626432978124789, -3.0595028774892534, 0.15098307779804732,
# 1.1126095474854287, 0.38110171779164076, 1.9364004780687225, 0.22235335481093579, -1.8002217381550798,
# -0.70558753999902091, 2.963382220443747, -2.2589462616850731, 0.59612435854378309, -0.35356446544534881,
# 0.5648778551910083, 0.17033704006778155, 0.5060839478807142, 0.25300547672626333, 0.32407602759647469,
# 6.0782465343454941, -1.0811725796870233, 1.1174273862804875, -1.1368003446516448, -4.9914125647680363,
# 0.73875113954700855, 1.0473265626731625, 0.38706194851792741, 1.4512352933257084, -0.024051087935752818]
# rnn.weights[1] = [1.2849544717182271, -0.82627784842497254, 1.9806972719590872, -1.3404860441159823, 0.063739576435029102,
# 0.12683126700871813, 0.65883610605310594, -1.5179039027101742, -1.154818724088198, 3.3767498133387361,
# -1.5787200645487796, 0.0043726966620622008, 2.7960435372210006, -0.64773434184637213, 0.65394967554423489,
# 2.3756590192967009, -2.0716277249803334, -1.5805960003075494, -1.3864512411867511, -2.7885305512808598,
# -2.9205075656527311, -0.52016340775199643, -2.5017437202256838, -1.1897163743027142, -0.17265447621757785,
# -1.1325017470971213, -1.0679391962839133, -0.4218239593885344, -0.97411887795767649, -1.4519510365432908]
# rnn.weights[2] = [-1.6989305308659794, 0.92151681407801267, 0.5222166625078084, 1.2249475740581155, 0.91207565544622293,
# 0.94521030510371584, 0.27289993394574985, -0.11237519483934363, 1.1090120092036855, -0.32718115038747075,
# 0.69846462864219272, -4.1601471690468106, -1.6198760439773012, 0.22878426496839163, 1.202923138143533,
# 0.26516957184275108, 0.66363284709514114, -0.64157228008573675, 1.0090422843555509, -0.70820431085934366,
# -3.1982733019630514, 0.92664594706516556, -1.1515511478851503, 0.089271475518160184, 2.0459073214622094]
# for val in rnn.weights:
# print val
# print [len(rnn.weights[i]) for i in range(len(rnn.weights))]
print ""
print "Expected:", data[1]
print "Actual:", rnn.run_sequence(data[0])
print ""
print "Expected:", data[1]
out_temp = rnn.run_start(length=4, inputs=data[0][0])
print "Actual:", out_temp
# count = 0
# for i in range(len(out_temp)):
# if not(out_temp[i] == data[1][i]):
# count = count + 1
#
# print count
end = timer()
print "Time taken:", (end - start)
| import numpy as np
import random as rand
import math
from timeit import default_timer as timer
class RNN:
def __init__(self, num_layers=3, bias=1, lr=0.01, weights=[], activation=[1, 0], layers=[]):
self.num_layers = num_layers
self.layers = layers
self.weights = weights
self.neurons = []
self.sums = []
self.bias = bias
self.activation = activation
self.hidden_prev = [0 for _ in range(self.layers[1])]
self.lr = lr
self.loss = 0
self.max_weights = 0
self.r_layer = 1
self.weights_unfolded = []
self.sums_unfolded = []
self.neurons_unfolded = []
self.delta_sums = []
if len(self.weights) < num_layers:
self.weights = [[] for _ in range(num_layers)]
for i in range(self.num_layers):
num_weights = 0
if i == 0: # input-to-hidden layer weights
num_weights = self.layers[0] * self.layers[1]
if self.bias:
num_weights = num_weights + self.layers[1]
for j in range(num_weights):
rand_range = 1.0 / np.sqrt(self.layers[0] + 1)
self.weights[0].append(rand.uniform(-rand_range, rand_range))
elif i == self.r_layer: # hidden-to-hidden layer weights
num_weights = self.layers[self.r_layer] * self.layers[self.r_layer]
for j in range(num_weights):
rand_range = 1.0/np.sqrt(self.layers[self.r_layer] + 1)
self.weights[-1].append(rand.uniform(-rand_range, rand_range))
else: # hidden-to-other and other-to-other layer weights
num_weights = self.layers[i - 1] * self.layers[i]
if self.bias and i > 0: # bias only for hidden and output
num_weights = num_weights + self.layers[i]
for j in range(num_weights):
rand_range = 1.0 / np.sqrt(self.layers[i - 1] + 1)
self.weights[i - 1].append(rand.uniform(-rand_range, rand_range))
if num_weights > self.max_weights:
self.max_weights = num_weights
def sigmoid(self, x):
if x > 100:
return 0
if x < -100:
return 1
return 1 / (1 + math.exp(-x))
def relu(self, x):
relu_const = 1
if x > 0:
return relu_const * x
else:
return 0
def linear(self, x):
linear_const = 1
return linear_const * x
def softmax(self, x=[]):
sum_exp = 0.0
for i in range(len(x)):
sum_exp = sum_exp + np.exp(x[i])
return [np.exp(x[i])/sum_exp for i in range(len(x))]
def activation_function(self, val, layer):
if self.activation[layer] == 0:
return self.sigmoid(val)
if self.activation[layer] == 1:
return np.tanh(val)
if self.activation[layer] == 2:
return self.relu(val)
if self.activation[layer] == 3:
return self.linear(val)
if self.activation[layer] > 3 or self.activation[layer] < 0:
return self.sigmoid(val)
def loss_function(self, target, predicted):
if self.loss == 0:
return target - predicted
if self.loss == 1:
return -target*np.log(predicted)
if self.loss < 0 or self.loss > 1:
return target - predicted
# run a single input value through the RNN
def run(self, inputs=[]):
self.neurons = []
self.neurons.append(inputs)
self.sums = []
self.sums.append([])
for i in range(1, self.num_layers): # go through all layers
self.neurons.append([])
self.sums.append([])
for j in range(self.layers[i]): # current layer neurons
weighted_sum = 0
for k in range(self.layers[i - 1]): # previous layer neurons
weighted_sum = weighted_sum + \
self.neurons[i - 1][k] * self.weights[i - 1][j * self.layers[i - 1] + k]
if i == self.r_layer: # hidden recurrent layer neurons
for k in range(self.layers[self.r_layer]): # adding previous hidden values
weighted_sum = weighted_sum + self.hidden_prev[k] * self.weights[-1][j * self.layers[self.r_layer] + k]
if self.bias: # adding bias values
weighted_sum = weighted_sum + self.weights[i - 1][self.layers[i] * self.layers[i - 1] + j]
self.sums[i].append(weighted_sum)
if not(self.activation[i - 1] == 4): # not softmax (softmax needs whole array)
self.neurons[i].append(self.activation_function(weighted_sum, i - 1))
if self.activation[i - 1] == 4: # softmax activation (softmax needs whole array)
soft_out = self.softmax(self.sums[i])
for val in soft_out:
self.neurons[i].append(val)
self.hidden_prev = [self.neurons[self.r_layer][j] for j in range(len(self.neurons[self.r_layer]))]
return self.neurons[-1]
# run a time sequence through the RNN
def run_sequence(self, input_set=[]):
self.hidden_prev = [0 for _ in range(self.layers[1])]
output_set = []
for t in range(len(input_set)):
output_set.append(self.run(input_set[t]))
for t in range(len(output_set)):
for i in range(len(output_set[t])): # transform softmax output into unicode input
if output_set[t][i] == np.max(output_set[t]):
output_set[t][i] = 1
else:
output_set[t][i] = 0
return output_set
# run a time sequence through the RNN given only the starting value
def run_start(self, length=10, inputs=[]):
self.hidden_prev = [0 for _ in range(self.layers[1])]
output_set = []
output_set.append(self.run(inputs))
for i in range(len(output_set[0])): # transform softmax output into unicode input
if output_set[0][i] == np.max(output_set[0]):
output_set[0][i] = 1
else:
output_set[0][i] = 0
for t in range(1, length): # start on second iteration
output_set.append(self.run(output_set[t - 1])) # run RNN with previous outputs
for i in range(len(output_set[t])): # transform softmax output into unicode input
if output_set[t][i] == np.max(output_set[t]):
output_set[t][i] = 1
else:
output_set[t][i] = 0
return output_set
def unfold(self, steps):
self.weights_unfolded = []
for i in range(steps): # add unfolded layers until recurrent layer
self.weights_unfolded.append(self.weights[0])
self.weights_unfolded.append(self.weights[-1])
for j in range(self.r_layer, self.num_layers - 1): # add one set of layers following the recurrent layer
self.weights_unfolded.append(self.weights[j])
def run_unfolded(self, steps, input_set=[]):
self.sums_unfolded = []
self.sums_unfolded.append([])
self.neurons_unfolded = []
self.neurons_unfolded.append(self.hidden_prev)
for i in range(steps): # run through all input and recurrent layers
self.neurons_unfolded.append([])
self.sums_unfolded.append([])
for j in range(self.layers[self.r_layer]): # run through all recurrent neurons
weighted_sum = 0
for k in range(self.layers[0]): # adding input layer neurons
weighted_sum = weighted_sum + input_set[i][k] * self.weights[0][j * self.layers[0] + k]
for k in range(self.layers[self.r_layer]): # adding previous hidden values
weighted_sum = weighted_sum + self.neurons_unfolded[i][k] * \
self.weights[-1][j * self.layers[self.r_layer] + k]
if self.bias: # adding bias values
weighted_sum = weighted_sum + self.weights[0][self.layers[1] * self.layers[0] + j]
self.sums_unfolded[-1].append(weighted_sum)
self.neurons_unfolded[-1].append(self.activation_function(weighted_sum, 0))
# calculate output layer values for final hidden layer output
for i in range(self.r_layer + 1, self.num_layers):
self.neurons_unfolded.append([])
self.sums_unfolded.append([])
for j in range(self.layers[i]):
weighted_sum = 0
for k in range(self.layers[i - 1]): # previous layer neurons
weighted_sum = weighted_sum + \
self.neurons_unfolded[-2][k] * self.weights[i - 1][j * self.layers[i - 1] + k]
if self.bias: # adding bias values
weighted_sum = weighted_sum + self.weights[i - 1][self.layers[i] * self.layers[i - 1] + j]
self.sums_unfolded[-1].append(weighted_sum)
if not (self.activation[i - 1] == 4): # not softmax (softmax needs whole array)
self.neurons_unfolded[-1].append(self.activation_function(weighted_sum, i - 1))
if self.activation[i - 1] == 4: # softmax activation (softmax needs whole array)
soft_out = self.softmax(self.sums_unfolded[-1])
for val in soft_out:
self.neurons_unfolded[-1].append(val)
return self.neurons_unfolded[-1]
def backpropagate(self, steps, predicted_outputs=[], step_outputs=[], delta_accumulate=[], step_inputs=[]):
# first get error(s) at output
self.delta_sums = []
self.delta_sums.append([self.loss_function(step_outputs[i], predicted_outputs[i]) for i in range(self.layers[-1])])
# iterate backwards starting at the outputs and ending just before the recurrent layer
for layer in range(self.num_layers - self.r_layer - 1):
self.delta_sums.insert(0, [0 for _ in range(self.layers[-1 - layer - 1])])
# run through each neuron in current layer
error_gradients = []
if self.activation[-1 - layer] == 4: # softmax activation if necessary
sum = 0
exps = [] # store exp values of all sums
for x in range(self.layers[-1 - layer]):
exps.append(np.exp(self.sums_unfolded[-1 - layer][x]))
sum = sum + exps[-1]
sum = np.power(sum, 2) # square the sum
for x in range(self.layers[-1 - layer]):
partial_sum = 0
for y in range(self.layers[-1 - layer]): # sum all other terms
if not(y == x):
partial_sum = partial_sum + exps[y]
partial_sum = partial_sum * exps[x]
error_gradients.append(partial_sum/sum)
for i in range(self.layers[-1 - layer]):
# run through each neuron in previous layer
for j in range(self.layers[-1 - layer - 1]):
# back-propagate error
self.delta_sums[0][j] = self.delta_sums[0][j] + self.delta_sums[1][i] * \
self.weights_unfolded[-1 - layer][i * self.layers[-1 - layer - 1] + j]
error_gradient = 0
# calculate error gradient for current layer
if self.activation[-1 - layer] == 0: # differentiate sigmoid(x) = f(x): f'(x) = f(x)[1 - f(x)]
error_gradient = self.neurons_unfolded[-1 - layer][i] * (1 - self.neurons_unfolded[-1 - layer][i])
if self.activation[-1 - layer] == 1: # differentiate tanh(x) = f(x): f'(x) = sech(x)^2 = 1/cosh(x)^2
error_gradient = 1.0/np.power(np.cosh(self.sums_unfolded[-1 - layer][i]), 2)
if self.activation[-1 - layer] == 2: # differentiate relu(x) = 1 if sum > 0, otherwise 0
if self.sums_unfolded[-1 - layer][i] > 0:
error_gradient = 1
if self.activation[-1 - layer] == 3: # differentiate x = x
error_gradient = 1
if self.activation[-1 - layer] == 4: # differentiate softmax done earlier
error_gradient = error_gradients[i]
weight_update = self.lr * self.delta_sums[-1 - layer][i] * \
error_gradient * self.neurons_unfolded[-1 - layer - 1][j]
# print "Weight update0:", self.lr, "*", self.delta_sums[-1 - layer][i], "*", \
# error_gradient, "*", self.neurons_unfolded[-1 - layer - 1][j], "=", weight_update
delta_accumulate[-1 - layer - 1][i * self.layers[-1 - layer - 1] + j] = \
delta_accumulate[-1 - layer - 1][i * self.layers[-1 - layer - 1] + j] + weight_update
if self.bias:
error_gradient = 0
# calculate error gradient for current layer
if self.activation[-1 - layer] == 0: # differentiate sigmoid(x) = f(x): f'(x) = f(x)[1 - f(x)]
error_gradient = self.neurons_unfolded[-1 - layer][i] * (1 - self.neurons_unfolded[-1 - layer][i])
if self.activation[
-1 - layer] == 1: # differentiate tanh(x) = f(x): f'(x) = sech(x)^2 = 1/cosh(x)^2
error_gradient = 1.0 / np.power(np.cosh(self.sums_unfolded[-1 - layer][i]), 2)
if self.activation[-1 - layer] == 2: # differentiate relu(x) = 1 if sum > 0, otherwise 0
if self.sums_unfolded[-1 - layer][i] > 0:
error_gradient = 1
if self.activation[-1 - layer] == 3: # differentiate x = x
error_gradient = 1
if self.activation[-1 - layer] == 4: # differentiate softmax done earlier
error_gradient = error_gradients[i]
weight_update = self.lr * self.delta_sums[-1 - layer][i] * error_gradient
# print "Weight update1:", self.lr, "*", self.delta_sums[-1 - layer - 1][i], "*", \
# error_gradient, "=", weight_update
delta_accumulate[-1 - layer - 1][self.layers[-1 - layer] * self.layers[-1 - layer - 1] + i] = \
delta_accumulate[-1 - layer - 1][self.layers[-1 - layer] * self.layers[-1 - layer - 1] + i] + \
weight_update
# iterate backwards through the unfolded weights array, starting at the last recurrent layer
for layer in range(steps, 0, -1):
if layer > 1: # zero delta sums for previous layer
self.delta_sums.insert(0, [0 for _ in range(self.layers[self.r_layer])])
# run through each neuron in current layer (recurrent layer)
for i in range(self.layers[self.r_layer]):
# run through each neuron in previous layer (recurrent layer)
for j in range(self.layers[self.r_layer]):
if layer > 1: # don't back-propagate into input layer
# back-propagate error to recurrent layers (only accessing recurrent layer weights)
self.delta_sums[0][j] = self.delta_sums[0][j] + self.delta_sums[1][i] * \
self.weights_unfolded[layer * 2 - 1][i * self.layers[self.r_layer] + j]
error_gradient = 0
# calculate error gradient for current layer
if self.activation[0] == 0: # differentiate sigmoid(x) = f(x): f'(x) = f(x)[1 - f(x)]
error_gradient = self.neurons_unfolded[layer][i] * (1 - self.neurons_unfolded[layer][i])
if self.activation[0] == 1: # differentiate tanh(x) = f(x): f'(x) = sech(x)^2 = 1/cosh(x)^2
error_gradient = 1.0 / np.power(np.cosh(self.sums_unfolded[layer][i]), 2)
if self.activation[0] == 2: # differentiate relu(x) = 1 if sum > 0, otherwise 0
if self.sums_unfolded[layer][i] > 0:
error_gradient = 1
if self.activation[0] == 3: # differentiate x = 1
error_gradient = 1
# weight update for recurrent neuron
weight_update = self.lr * self.delta_sums[0][i] * \
error_gradient * self.neurons_unfolded[layer - 1][j]
# print "Weight update2:", self.lr, "*", self.delta_sums[layer - 1][i], "*", \
# error_gradient, "*", self.neurons_unfolded[layer - 1][j], "=", weight_update
# store at -1, since recurrent weights are stored at -1
delta_accumulate[-1][i * self.layers[self.r_layer] + j] = \
delta_accumulate[-1][i * self.layers[self.r_layer] + j] + weight_update
# run through each neuron in previous layer (input layer)
for j in range(self.layers[0]):
error_gradient = 0
# calculate error gradient for current layer
if self.activation[0] == 0: # differentiate sigmoid(x) = f(x): f'(x) = f(x)[1 - f(x)]
error_gradient = self.neurons_unfolded[layer][i] * (1 - self.neurons_unfolded[layer][i])
if self.activation[0] == 1: # differentiate tanh(x) = f(x): f'(x) = sech(x)^2 = 1/cosh(x)^2
error_gradient = 1.0 / np.power(np.cosh(self.sums_unfolded[layer][i]), 2)
if self.activation[0] == 2: # differentiate relu(x) = 1 if sum > 0, otherwise 0
if self.sums_unfolded[layer][i] > 0:
error_gradient = 1
if self.activation[0] == 3: # differentiate x = 1
error_gradient = 1
# weight update for input neuron
weight_update = self.lr * self.delta_sums[0][i] * \
error_gradient * step_inputs[layer - 1][j]
# print "Weight update3:", self.lr, "*", self.delta_sums[layer - 1][i], "*", \
# error_gradient, "*", step_inputs[layer - 1][j], "=", weight_update
delta_accumulate[0][i * self.layers[0] + j] = \
delta_accumulate[0][i * self.layers[0] + j] + weight_update
# bias neuron update
if self.bias:
error_gradient = 0
# calculate error gradient for current layer
if self.activation[0] == 0: # differentiate sigmoid(x) = f(x): f'(x) = f(x)[1 - f(x)]
error_gradient = self.neurons_unfolded[layer][i] * (1 - self.neurons_unfolded[layer][i])
if self.activation[0] == 1: # differentiate tanh(x) = f(x): f'(x) = sech(x)^2 = 1/cosh(x)^2
error_gradient = 1.0 / np.power(np.cosh(self.sums_unfolded[layer][i]), 2)
if self.activation[0] == 2: # differentiate relu(x) = 1 if sum > 0, otherwise 0
if self.sums_unfolded[layer][i] > 0:
error_gradient = 1
if self.activation[0] == 3: # differentiate x = 1
error_gradient = 1
# weight update for recurrent neuron
weight_update = self.lr * self.delta_sums[0][i] * error_gradient
# print "Weight update4:", self.lr, "*", self.delta_sums[layer - 1][i], "*", \
# error_gradient, "=", weight_update
delta_accumulate[0][self.layers[self.r_layer] * self.layers[0] + i] = \
delta_accumulate[0][self.layers[self.r_layer] * self.layers[0] + i] + weight_update
def train_one(self, epochs=100, error=0.01, steps=1, data=[]):
num_epochs = 0
error_tot = 1
self.hidden_prev = [0 for _ in range(self.layers[1])]
if steps > len(data[0]):
steps = len(data[0])
while num_epochs < epochs and error_tot > error: # run through data until trained or epoch finished
error_tot = 0.0
# unfold necessary layers
self.unfold(steps)
for t in range(len(data[0]) + 1 - steps): # run through all the data in time steps
# select data inputs
step_inputs = data[0][t: t + steps]
# run inputs through unfolded network
predicted_outputs = self.run_unfolded(steps, step_inputs)
step_outputs = data[1][t] # only 1 data output for time t
error_tot = error_tot + np.sum([np.power(self.loss_function(step_outputs[i], predicted_outputs[i]), 2)
for i in range(len(step_outputs))])/float(len(step_outputs))
# store cumulative weight updates
delta_accumulate = [[0 for _ in range(len(self.weights[i]))] for i in range(self.num_layers)]
# back-propagate error and store weight changes
self.backpropagate(steps, predicted_outputs, step_outputs, delta_accumulate, step_inputs)
# print "DA 0:", delta_accumulate[0]
# print "DA 1:", delta_accumulate[1]
# print "DA 2:", delta_accumulate[2]
# update weights in folded network
for i in range(len(delta_accumulate)):
for j in range(len(delta_accumulate[i])):
# if i <= steps * self.r_layer:
# delta_accumulate[i][j] = delta_accumulate[i][j] / float(steps)
self.weights[i][j] = self.weights[i][j] + delta_accumulate[i][j]
# get recurrent hidden inputs for next iteration using a folded run
self.run(data[0][t])
error_tot = error_tot/(len(data[0]) + 1 - steps)
print "At Epoch:", num_epochs, "-> Current error is:", error_tot
num_epochs = num_epochs + 1
start = timer()
# print rnn.run_sequence([[1, 0, 0], [0, 0, 1]])
# print rnn.run_start(length=5, inputs=[1, 0, 0])
# rnn.unfold(1)
# print rnn.run_unfolded(1, [[1, 0, 0]])
#
# rnn.unfold(2)
# print rnn.run_unfolded(2, [[1, 0, 0], [0, 1, 0]])
# rnn.unfold(1)
# rnn.run_unfolded(1, [[0, 0, 1, 0]])
# delta_accumulate = [[0 for _ in range(len(rnn.weights[i]))] for i in range(rnn.num_layers)]
# rnn.backpropagate(1, [1, 0, 0, 0], [0, 1, 0, 0], delta_accumulate, [[0, 0, 1, 0]])
# data = [[ # inputs
# [0, 0, 1, 0, 0], # 0
# [0, 1, 0, 0, 0], # 1
# [1, 0, 0, 0, 0], # 2
# [0, 1, 0, 0, 0], # 1
# [0, 0, 1, 0, 0], # 0
# [0, 0, 0, 1, 0], # -1
# [0, 0, 0, 0, 1], # -2
# [0, 0, 0, 1, 0], # -1
# [0, 0, 1, 0, 0] # 0
# ], [ # outputs
# [0, 1, 0, 0, 0], # 1
# [1, 0, 0, 0, 0], # 2
# [0, 1, 0, 0, 0], # 1
# [0, 0, 1, 0, 0], # 0
# [0, 0, 0, 1, 0], # -1
# [0, 0, 0, 0, 1], # -2
# [0, 0, 0, 1, 0], # -1
# [0, 0, 1, 0, 0], # 0
# [0, 1, 0, 0, 0] # 1
# ]]
data = [[
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0],
], [
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0],
[1, 0, 0, 0]
]]
# count = 9
# while count > 0:
rnn = RNN(layers=[4, 4, 4], lr=0.1, activation=[1, 0])
rnn.train_one(data=data, error=0.007, epochs=1000)
# rnn.train_one(steps=2, data=data, error=0.01, epochs=1000)
# rnn.train_one(steps=3, data=data, error=0.01)
# rnn.train_one(steps=4, data=data, error=0.01)
# rnn.weights[0] = [0.2632778982740025, 1.2873720206851751, 1.7626432978124789, -3.0595028774892534, 0.15098307779804732,
# 1.1126095474854287, 0.38110171779164076, 1.9364004780687225, 0.22235335481093579, -1.8002217381550798,
# -0.70558753999902091, 2.963382220443747, -2.2589462616850731, 0.59612435854378309, -0.35356446544534881,
# 0.5648778551910083, 0.17033704006778155, 0.5060839478807142, 0.25300547672626333, 0.32407602759647469,
# 6.0782465343454941, -1.0811725796870233, 1.1174273862804875, -1.1368003446516448, -4.9914125647680363,
# 0.73875113954700855, 1.0473265626731625, 0.38706194851792741, 1.4512352933257084, -0.024051087935752818]
# rnn.weights[1] = [1.2849544717182271, -0.82627784842497254, 1.9806972719590872, -1.3404860441159823, 0.063739576435029102,
# 0.12683126700871813, 0.65883610605310594, -1.5179039027101742, -1.154818724088198, 3.3767498133387361,
# -1.5787200645487796, 0.0043726966620622008, 2.7960435372210006, -0.64773434184637213, 0.65394967554423489,
# 2.3756590192967009, -2.0716277249803334, -1.5805960003075494, -1.3864512411867511, -2.7885305512808598,
# -2.9205075656527311, -0.52016340775199643, -2.5017437202256838, -1.1897163743027142, -0.17265447621757785,
# -1.1325017470971213, -1.0679391962839133, -0.4218239593885344, -0.97411887795767649, -1.4519510365432908]
# rnn.weights[2] = [-1.6989305308659794, 0.92151681407801267, 0.5222166625078084, 1.2249475740581155, 0.91207565544622293,
# 0.94521030510371584, 0.27289993394574985, -0.11237519483934363, 1.1090120092036855, -0.32718115038747075,
# 0.69846462864219272, -4.1601471690468106, -1.6198760439773012, 0.22878426496839163, 1.202923138143533,
# 0.26516957184275108, 0.66363284709514114, -0.64157228008573675, 1.0090422843555509, -0.70820431085934366,
# -3.1982733019630514, 0.92664594706516556, -1.1515511478851503, 0.089271475518160184, 2.0459073214622094]
# for val in rnn.weights:
# print val
# print [len(rnn.weights[i]) for i in range(len(rnn.weights))]
print ""
print "Expected:", data[1]
print "Actual:", rnn.run_sequence(data[0])
print ""
print "Expected:", data[1]
out_temp = rnn.run_start(length=4, inputs=data[0][0])
print "Actual:", out_temp
# count = 0
# for i in range(len(out_temp)):
# if not(out_temp[i] == data[1][i]):
# count = count + 1
#
# print count
end = timer()
print "Time taken:", (end - start)
| en | 0.53896 | # input-to-hidden layer weights # hidden-to-hidden layer weights # hidden-to-other and other-to-other layer weights # bias only for hidden and output # run a single input value through the RNN # go through all layers # current layer neurons # previous layer neurons # hidden recurrent layer neurons # adding previous hidden values # adding bias values # not softmax (softmax needs whole array) # softmax activation (softmax needs whole array) # run a time sequence through the RNN # transform softmax output into unicode input # run a time sequence through the RNN given only the starting value # transform softmax output into unicode input # start on second iteration # run RNN with previous outputs # transform softmax output into unicode input # add unfolded layers until recurrent layer # add one set of layers following the recurrent layer # run through all input and recurrent layers # run through all recurrent neurons # adding input layer neurons # adding previous hidden values # adding bias values # calculate output layer values for final hidden layer output # previous layer neurons # adding bias values # not softmax (softmax needs whole array) # softmax activation (softmax needs whole array) # first get error(s) at output # iterate backwards starting at the outputs and ending just before the recurrent layer # run through each neuron in current layer # softmax activation if necessary # store exp values of all sums # square the sum # sum all other terms # run through each neuron in previous layer # back-propagate error # calculate error gradient for current layer # differentiate sigmoid(x) = f(x): f'(x) = f(x)[1 - f(x)] # differentiate tanh(x) = f(x): f'(x) = sech(x)^2 = 1/cosh(x)^2 # differentiate relu(x) = 1 if sum > 0, otherwise 0 # differentiate x = x # differentiate softmax done earlier # print "Weight update0:", self.lr, "*", self.delta_sums[-1 - layer][i], "*", \ # error_gradient, "*", self.neurons_unfolded[-1 - layer - 1][j], "=", weight_update # calculate error gradient for current layer # differentiate sigmoid(x) = f(x): f'(x) = f(x)[1 - f(x)] # differentiate tanh(x) = f(x): f'(x) = sech(x)^2 = 1/cosh(x)^2 # differentiate relu(x) = 1 if sum > 0, otherwise 0 # differentiate x = x # differentiate softmax done earlier # print "Weight update1:", self.lr, "*", self.delta_sums[-1 - layer - 1][i], "*", \ # error_gradient, "=", weight_update # iterate backwards through the unfolded weights array, starting at the last recurrent layer # zero delta sums for previous layer # run through each neuron in current layer (recurrent layer) # run through each neuron in previous layer (recurrent layer) # don't back-propagate into input layer # back-propagate error to recurrent layers (only accessing recurrent layer weights) # calculate error gradient for current layer # differentiate sigmoid(x) = f(x): f'(x) = f(x)[1 - f(x)] # differentiate tanh(x) = f(x): f'(x) = sech(x)^2 = 1/cosh(x)^2 # differentiate relu(x) = 1 if sum > 0, otherwise 0 # differentiate x = 1 # weight update for recurrent neuron # print "Weight update2:", self.lr, "*", self.delta_sums[layer - 1][i], "*", \ # error_gradient, "*", self.neurons_unfolded[layer - 1][j], "=", weight_update # store at -1, since recurrent weights are stored at -1 # run through each neuron in previous layer (input layer) # calculate error gradient for current layer # differentiate sigmoid(x) = f(x): f'(x) = f(x)[1 - f(x)] # differentiate tanh(x) = f(x): f'(x) = sech(x)^2 = 1/cosh(x)^2 # differentiate relu(x) = 1 if sum > 0, otherwise 0 # differentiate x = 1 # weight update for input neuron # print "Weight update3:", self.lr, "*", self.delta_sums[layer - 1][i], "*", \ # error_gradient, "*", step_inputs[layer - 1][j], "=", weight_update # bias neuron update # calculate error gradient for current layer # differentiate sigmoid(x) = f(x): f'(x) = f(x)[1 - f(x)] # differentiate tanh(x) = f(x): f'(x) = sech(x)^2 = 1/cosh(x)^2 # differentiate relu(x) = 1 if sum > 0, otherwise 0 # differentiate x = 1 # weight update for recurrent neuron # print "Weight update4:", self.lr, "*", self.delta_sums[layer - 1][i], "*", \ # error_gradient, "=", weight_update # run through data until trained or epoch finished # unfold necessary layers # run through all the data in time steps # select data inputs # run inputs through unfolded network # only 1 data output for time t # store cumulative weight updates # back-propagate error and store weight changes # print "DA 0:", delta_accumulate[0] # print "DA 1:", delta_accumulate[1] # print "DA 2:", delta_accumulate[2] # update weights in folded network # if i <= steps * self.r_layer: # delta_accumulate[i][j] = delta_accumulate[i][j] / float(steps) # get recurrent hidden inputs for next iteration using a folded run # print rnn.run_sequence([[1, 0, 0], [0, 0, 1]]) # print rnn.run_start(length=5, inputs=[1, 0, 0]) # rnn.unfold(1) # print rnn.run_unfolded(1, [[1, 0, 0]]) # # rnn.unfold(2) # print rnn.run_unfolded(2, [[1, 0, 0], [0, 1, 0]]) # rnn.unfold(1) # rnn.run_unfolded(1, [[0, 0, 1, 0]]) # delta_accumulate = [[0 for _ in range(len(rnn.weights[i]))] for i in range(rnn.num_layers)] # rnn.backpropagate(1, [1, 0, 0, 0], [0, 1, 0, 0], delta_accumulate, [[0, 0, 1, 0]]) # data = [[ # inputs # [0, 0, 1, 0, 0], # 0 # [0, 1, 0, 0, 0], # 1 # [1, 0, 0, 0, 0], # 2 # [0, 1, 0, 0, 0], # 1 # [0, 0, 1, 0, 0], # 0 # [0, 0, 0, 1, 0], # -1 # [0, 0, 0, 0, 1], # -2 # [0, 0, 0, 1, 0], # -1 # [0, 0, 1, 0, 0] # 0 # ], [ # outputs # [0, 1, 0, 0, 0], # 1 # [1, 0, 0, 0, 0], # 2 # [0, 1, 0, 0, 0], # 1 # [0, 0, 1, 0, 0], # 0 # [0, 0, 0, 1, 0], # -1 # [0, 0, 0, 0, 1], # -2 # [0, 0, 0, 1, 0], # -1 # [0, 0, 1, 0, 0], # 0 # [0, 1, 0, 0, 0] # 1 # ]] # count = 9 # while count > 0: # rnn.train_one(steps=2, data=data, error=0.01, epochs=1000) # rnn.train_one(steps=3, data=data, error=0.01) # rnn.train_one(steps=4, data=data, error=0.01) # rnn.weights[0] = [0.2632778982740025, 1.2873720206851751, 1.7626432978124789, -3.0595028774892534, 0.15098307779804732, # 1.1126095474854287, 0.38110171779164076, 1.9364004780687225, 0.22235335481093579, -1.8002217381550798, # -0.70558753999902091, 2.963382220443747, -2.2589462616850731, 0.59612435854378309, -0.35356446544534881, # 0.5648778551910083, 0.17033704006778155, 0.5060839478807142, 0.25300547672626333, 0.32407602759647469, # 6.0782465343454941, -1.0811725796870233, 1.1174273862804875, -1.1368003446516448, -4.9914125647680363, # 0.73875113954700855, 1.0473265626731625, 0.38706194851792741, 1.4512352933257084, -0.024051087935752818] # rnn.weights[1] = [1.2849544717182271, -0.82627784842497254, 1.9806972719590872, -1.3404860441159823, 0.063739576435029102, # 0.12683126700871813, 0.65883610605310594, -1.5179039027101742, -1.154818724088198, 3.3767498133387361, # -1.5787200645487796, 0.0043726966620622008, 2.7960435372210006, -0.64773434184637213, 0.65394967554423489, # 2.3756590192967009, -2.0716277249803334, -1.5805960003075494, -1.3864512411867511, -2.7885305512808598, # -2.9205075656527311, -0.52016340775199643, -2.5017437202256838, -1.1897163743027142, -0.17265447621757785, # -1.1325017470971213, -1.0679391962839133, -0.4218239593885344, -0.97411887795767649, -1.4519510365432908] # rnn.weights[2] = [-1.6989305308659794, 0.92151681407801267, 0.5222166625078084, 1.2249475740581155, 0.91207565544622293, # 0.94521030510371584, 0.27289993394574985, -0.11237519483934363, 1.1090120092036855, -0.32718115038747075, # 0.69846462864219272, -4.1601471690468106, -1.6198760439773012, 0.22878426496839163, 1.202923138143533, # 0.26516957184275108, 0.66363284709514114, -0.64157228008573675, 1.0090422843555509, -0.70820431085934366, # -3.1982733019630514, 0.92664594706516556, -1.1515511478851503, 0.089271475518160184, 2.0459073214622094] # for val in rnn.weights: # print val # print [len(rnn.weights[i]) for i in range(len(rnn.weights))] # count = 0 # for i in range(len(out_temp)): # if not(out_temp[i] == data[1][i]): # count = count + 1 # # print count | 3.048867 | 3 |